Allow `_doc` as a type. (#27816)
Allowing `_doc` as a type will enable users to make the transition to 7.0 smoother since the index APIs will be `PUT index/_doc/id` and `POST index/_doc`. This also moves most of the documentation to `_doc` as a type name. Closes #27750 Closes #27751
This commit is contained in:
parent
bb14b8f7c5
commit
1b660821a2
|
@ -293,7 +293,9 @@ public class MetaDataMappingService extends AbstractComponent {
|
|||
}
|
||||
assert mappingType != null;
|
||||
|
||||
if (!MapperService.DEFAULT_MAPPING.equals(mappingType) && mappingType.charAt(0) == '_') {
|
||||
if (MapperService.DEFAULT_MAPPING.equals(mappingType) == false
|
||||
&& MapperService.SINGLE_MAPPING_NAME.equals(mappingType) == false
|
||||
&& mappingType.charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("Document mapping type name can't start with '_', found: [" + mappingType + "]");
|
||||
}
|
||||
MetaData.Builder builder = MetaData.builder(metaData);
|
||||
|
|
|
@ -90,6 +90,7 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
}
|
||||
|
||||
public static final String DEFAULT_MAPPING = "_default_";
|
||||
public static final String SINGLE_MAPPING_NAME = "_doc";
|
||||
public static final Setting<Long> INDEX_MAPPING_NESTED_FIELDS_LIMIT_SETTING =
|
||||
Setting.longSetting("index.mapping.nested_fields.limit", 50L, 0, Property.Dynamic, Property.IndexScope);
|
||||
// maximum allowed number of nested json objects across all fields in a single document
|
||||
|
@ -338,6 +339,27 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
return internalMerge(defaultMapper, defaultMappingSource, documentMappers, reason, updateAllTypes);
|
||||
}
|
||||
|
||||
static void validateTypeName(String type) {
|
||||
if (type.length() == 0) {
|
||||
throw new InvalidTypeNameException("mapping type name is empty");
|
||||
}
|
||||
if (type.length() > 255) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + type + "] is too long; limit is length 255 but was [" + type.length() + "]");
|
||||
}
|
||||
if (type.charAt(0) == '_' && SINGLE_MAPPING_NAME.equals(type) == false) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + type + "] can't start with '_' unless it is called [" + SINGLE_MAPPING_NAME + "]");
|
||||
}
|
||||
if (type.contains("#")) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + type + "] should not include '#' in it");
|
||||
}
|
||||
if (type.contains(",")) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + type + "] should not include ',' in it");
|
||||
}
|
||||
if (type.charAt(0) == '.') {
|
||||
throw new IllegalArgumentException("mapping type name [" + type + "] must not start with a '.'");
|
||||
}
|
||||
}
|
||||
|
||||
private synchronized Map<String, DocumentMapper> internalMerge(@Nullable DocumentMapper defaultMapper, @Nullable String defaultMappingSource,
|
||||
List<DocumentMapper> documentMappers, MergeReason reason, boolean updateAllTypes) {
|
||||
boolean hasNested = this.hasNested;
|
||||
|
@ -361,27 +383,10 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
|
||||
for (DocumentMapper mapper : documentMappers) {
|
||||
// check naming
|
||||
if (mapper.type().length() == 0) {
|
||||
throw new InvalidTypeNameException("mapping type name is empty");
|
||||
}
|
||||
if (mapper.type().length() > 255) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] is too long; limit is length 255 but was [" + mapper.type().length() + "]");
|
||||
}
|
||||
if (mapper.type().charAt(0) == '_') {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] can't start with '_'");
|
||||
}
|
||||
if (mapper.type().contains("#")) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include '#' in it");
|
||||
}
|
||||
if (mapper.type().contains(",")) {
|
||||
throw new InvalidTypeNameException("mapping type name [" + mapper.type() + "] should not include ',' in it");
|
||||
}
|
||||
validateTypeName(mapper.type());
|
||||
if (mapper.type().equals(mapper.parentFieldMapper().type())) {
|
||||
throw new IllegalArgumentException("The [_parent.type] option can't point to the same type");
|
||||
}
|
||||
if (typeNameStartsWithIllegalDot(mapper)) {
|
||||
throw new IllegalArgumentException("mapping type name [" + mapper.type() + "] must not start with a '.'");
|
||||
}
|
||||
|
||||
// compute the merged DocumentMapper
|
||||
DocumentMapper oldMapper = mappers.get(mapper.type());
|
||||
|
@ -519,10 +524,6 @@ public class MapperService extends AbstractIndexComponent implements Closeable {
|
|||
return true;
|
||||
}
|
||||
|
||||
private boolean typeNameStartsWithIllegalDot(DocumentMapper mapper) {
|
||||
return mapper.type().startsWith(".");
|
||||
}
|
||||
|
||||
private boolean assertSerialization(DocumentMapper mapper) {
|
||||
// capture the source now, it may change due to concurrent parsing
|
||||
final CompressedXContent mappingSource = mapper.mappingSource();
|
||||
|
|
|
@ -241,11 +241,11 @@ public class MetaDataTests extends ESTestCase {
|
|||
.put(IndexMetaData.builder("index1")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
|
||||
.putMapping("doc", FIND_MAPPINGS_TEST_ITEM))
|
||||
.putMapping("_doc", FIND_MAPPINGS_TEST_ITEM))
|
||||
.put(IndexMetaData.builder("index2")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
|
||||
.putMapping("doc", FIND_MAPPINGS_TEST_ITEM)).build();
|
||||
.putMapping("_doc", FIND_MAPPINGS_TEST_ITEM)).build();
|
||||
|
||||
{
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = metaData.findMappings(Strings.EMPTY_ARRAY,
|
||||
|
@ -266,7 +266,7 @@ public class MetaDataTests extends ESTestCase {
|
|||
{
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = metaData.findMappings(
|
||||
new String[]{"index1", "index2"},
|
||||
new String[]{randomBoolean() ? "doc" : "_all"}, MapperPlugin.NOOP_FIELD_FILTER);
|
||||
new String[]{randomBoolean() ? "_doc" : "_all"}, MapperPlugin.NOOP_FIELD_FILTER);
|
||||
assertEquals(2, mappings.size());
|
||||
assertIndexMappingsNotFiltered(mappings, "index1");
|
||||
assertIndexMappingsNotFiltered(mappings, "index2");
|
||||
|
@ -274,7 +274,7 @@ public class MetaDataTests extends ESTestCase {
|
|||
}
|
||||
|
||||
public void testFindMappingsNoOpFilters() throws IOException {
|
||||
MappingMetaData originalMappingMetaData = new MappingMetaData("doc",
|
||||
MappingMetaData originalMappingMetaData = new MappingMetaData("_doc",
|
||||
XContentHelper.convertToMap(JsonXContent.jsonXContent, FIND_MAPPINGS_TEST_ITEM, true));
|
||||
|
||||
MetaData metaData = MetaData.builder()
|
||||
|
@ -287,28 +287,28 @@ public class MetaDataTests extends ESTestCase {
|
|||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = metaData.findMappings(new String[]{"index1"},
|
||||
randomBoolean() ? Strings.EMPTY_ARRAY : new String[]{"_all"}, MapperPlugin.NOOP_FIELD_FILTER);
|
||||
ImmutableOpenMap<String, MappingMetaData> index1 = mappings.get("index1");
|
||||
MappingMetaData mappingMetaData = index1.get("doc");
|
||||
MappingMetaData mappingMetaData = index1.get("_doc");
|
||||
assertSame(originalMappingMetaData, mappingMetaData);
|
||||
}
|
||||
{
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = metaData.findMappings(new String[]{"index1"},
|
||||
randomBoolean() ? Strings.EMPTY_ARRAY : new String[]{"_all"}, index -> field -> randomBoolean());
|
||||
ImmutableOpenMap<String, MappingMetaData> index1 = mappings.get("index1");
|
||||
MappingMetaData mappingMetaData = index1.get("doc");
|
||||
MappingMetaData mappingMetaData = index1.get("_doc");
|
||||
assertNotSame(originalMappingMetaData, mappingMetaData);
|
||||
}
|
||||
{
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = metaData.findMappings(new String[]{"index1"},
|
||||
new String[]{"doc"}, MapperPlugin.NOOP_FIELD_FILTER);
|
||||
new String[]{"_doc"}, MapperPlugin.NOOP_FIELD_FILTER);
|
||||
ImmutableOpenMap<String, MappingMetaData> index1 = mappings.get("index1");
|
||||
MappingMetaData mappingMetaData = index1.get("doc");
|
||||
MappingMetaData mappingMetaData = index1.get("_doc");
|
||||
assertSame(originalMappingMetaData, mappingMetaData);
|
||||
}
|
||||
{
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = metaData.findMappings(new String[]{"index1"},
|
||||
new String[]{"doc"}, index -> field -> randomBoolean());
|
||||
new String[]{"_doc"}, index -> field -> randomBoolean());
|
||||
ImmutableOpenMap<String, MappingMetaData> index1 = mappings.get("index1");
|
||||
MappingMetaData mappingMetaData = index1.get("doc");
|
||||
MappingMetaData mappingMetaData = index1.get("_doc");
|
||||
assertNotSame(originalMappingMetaData, mappingMetaData);
|
||||
}
|
||||
}
|
||||
|
@ -318,7 +318,7 @@ public class MetaDataTests extends ESTestCase {
|
|||
String mapping = FIND_MAPPINGS_TEST_ITEM;
|
||||
if (randomBoolean()) {
|
||||
Map<String, Object> stringObjectMap = XContentHelper.convertToMap(JsonXContent.jsonXContent, FIND_MAPPINGS_TEST_ITEM, false);
|
||||
Map<String, Object> doc = (Map<String, Object>)stringObjectMap.get("doc");
|
||||
Map<String, Object> doc = (Map<String, Object>)stringObjectMap.get("_doc");
|
||||
try (XContentBuilder builder = JsonXContent.contentBuilder()) {
|
||||
builder.map(doc);
|
||||
mapping = builder.string();
|
||||
|
@ -329,20 +329,20 @@ public class MetaDataTests extends ESTestCase {
|
|||
.put(IndexMetaData.builder("index1")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
|
||||
.putMapping("doc", mapping))
|
||||
.putMapping("_doc", mapping))
|
||||
.put(IndexMetaData.builder("index2")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
|
||||
.putMapping("doc", mapping))
|
||||
.putMapping("_doc", mapping))
|
||||
.put(IndexMetaData.builder("index3")
|
||||
.settings(Settings.builder().put(IndexMetaData.SETTING_VERSION_CREATED, Version.CURRENT)
|
||||
.put(IndexMetaData.SETTING_NUMBER_OF_SHARDS, 1).put(IndexMetaData.SETTING_NUMBER_OF_REPLICAS, 0))
|
||||
.putMapping("doc", mapping)).build();
|
||||
.putMapping("_doc", mapping)).build();
|
||||
|
||||
{
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = metaData.findMappings(
|
||||
new String[]{"index1", "index2", "index3"},
|
||||
new String[]{"doc"}, index -> {
|
||||
new String[]{"_doc"}, index -> {
|
||||
if (index.equals("index1")) {
|
||||
return field -> field.startsWith("name.") == false && field.startsWith("properties.key.") == false
|
||||
&& field.equals("age") == false && field.equals("address.location") == false;
|
||||
|
@ -362,7 +362,7 @@ public class MetaDataTests extends ESTestCase {
|
|||
assertNotNull(index1Mappings);
|
||||
|
||||
assertEquals(1, index1Mappings.size());
|
||||
MappingMetaData docMapping = index1Mappings.get("doc");
|
||||
MappingMetaData docMapping = index1Mappings.get("_doc");
|
||||
assertNotNull(docMapping);
|
||||
|
||||
Map<String, Object> sourceAsMap = docMapping.getSourceAsMap();
|
||||
|
@ -406,13 +406,13 @@ public class MetaDataTests extends ESTestCase {
|
|||
{
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = metaData.findMappings(
|
||||
new String[]{"index1", "index2" , "index3"},
|
||||
new String[]{"doc"}, index -> field -> (index.equals("index3") && field.endsWith("keyword")));
|
||||
new String[]{"_doc"}, index -> field -> (index.equals("index3") && field.endsWith("keyword")));
|
||||
|
||||
assertIndexMappingsNoFields(mappings, "index1");
|
||||
assertIndexMappingsNoFields(mappings, "index2");
|
||||
ImmutableOpenMap<String, MappingMetaData> index3 = mappings.get("index3");
|
||||
assertEquals(1, index3.size());
|
||||
MappingMetaData mappingMetaData = index3.get("doc");
|
||||
MappingMetaData mappingMetaData = index3.get("_doc");
|
||||
Map<String, Object> sourceAsMap = mappingMetaData.getSourceAsMap();
|
||||
assertEquals(3, sourceAsMap.size());
|
||||
assertTrue(sourceAsMap.containsKey("_routing"));
|
||||
|
@ -442,7 +442,7 @@ public class MetaDataTests extends ESTestCase {
|
|||
{
|
||||
ImmutableOpenMap<String, ImmutableOpenMap<String, MappingMetaData>> mappings = metaData.findMappings(
|
||||
new String[]{"index1", "index2" , "index3"},
|
||||
new String[]{"doc"}, index -> field -> (index.equals("index2")));
|
||||
new String[]{"_doc"}, index -> field -> (index.equals("index2")));
|
||||
|
||||
assertIndexMappingsNoFields(mappings, "index1");
|
||||
assertIndexMappingsNoFields(mappings, "index3");
|
||||
|
@ -456,7 +456,7 @@ public class MetaDataTests extends ESTestCase {
|
|||
ImmutableOpenMap<String, MappingMetaData> indexMappings = mappings.get(index);
|
||||
assertNotNull(indexMappings);
|
||||
assertEquals(1, indexMappings.size());
|
||||
MappingMetaData docMapping = indexMappings.get("doc");
|
||||
MappingMetaData docMapping = indexMappings.get("_doc");
|
||||
assertNotNull(docMapping);
|
||||
Map<String, Object> sourceAsMap = docMapping.getSourceAsMap();
|
||||
assertEquals(3, sourceAsMap.size());
|
||||
|
@ -473,7 +473,7 @@ public class MetaDataTests extends ESTestCase {
|
|||
assertNotNull(indexMappings);
|
||||
|
||||
assertEquals(1, indexMappings.size());
|
||||
MappingMetaData docMapping = indexMappings.get("doc");
|
||||
MappingMetaData docMapping = indexMappings.get("_doc");
|
||||
assertNotNull(docMapping);
|
||||
|
||||
Map<String, Object> sourceAsMap = docMapping.getSourceAsMap();
|
||||
|
@ -540,7 +540,7 @@ public class MetaDataTests extends ESTestCase {
|
|||
}
|
||||
|
||||
private static final String FIND_MAPPINGS_TEST_ITEM = "{\n" +
|
||||
" \"doc\": {\n" +
|
||||
" \"_doc\": {\n" +
|
||||
" \"_routing\": {\n" +
|
||||
" \"required\":true\n" +
|
||||
" }," +
|
||||
|
|
|
@ -380,7 +380,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
|
|||
final String node_2 = internalCluster().startDataOnlyNode();
|
||||
List<IndexRequestBuilder> indexRequestBuilderList = new ArrayList<>();
|
||||
for (int i = 0; i < 100; i++) {
|
||||
indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setType("doc")
|
||||
indexRequestBuilderList.add(client().prepareIndex().setIndex("test").setType("_doc")
|
||||
.setSource("{\"int_field\":1}", XContentType.JSON));
|
||||
}
|
||||
indexRandom(true, indexRequestBuilderList);
|
||||
|
@ -398,7 +398,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
|
|||
|
||||
ensureStableCluster(2);
|
||||
assertAcked(prepareCreate("index").setSettings(Settings.builder().put("index.number_of_replicas", 0)));
|
||||
index("index", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
|
||||
index("index", "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
|
||||
ensureGreen();
|
||||
|
||||
internalCluster().restartNode(masterNode, new InternalTestCluster.RestartCallback() {
|
||||
|
@ -409,7 +409,7 @@ public class ClusterDisruptionIT extends AbstractDisruptionTestCase {
|
|||
});
|
||||
|
||||
ensureGreen("index");
|
||||
assertTrue(client().prepareGet("index", "doc", "1").get().isExists());
|
||||
assertTrue(client().prepareGet("index", "_doc", "1").get().isExists());
|
||||
}
|
||||
|
||||
/**
|
||||
|
|
|
@ -49,7 +49,7 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
String masterNode = internalCluster().startMasterOnlyNode(Settings.EMPTY);
|
||||
String dataNode = internalCluster().startDataOnlyNode(Settings.EMPTY);
|
||||
assertAcked(prepareCreate("test").setSettings(Settings.builder().put("index.number_of_replicas", 0)));
|
||||
index("test", "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
|
||||
index("test", "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
|
||||
ensureGreen("test");
|
||||
assertIndexInMetaState(dataNode, "test");
|
||||
assertIndexInMetaState(masterNode, "test");
|
||||
|
@ -64,7 +64,7 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
|
||||
String index = "index";
|
||||
assertAcked(prepareCreate(index).setSettings(Settings.builder().put("index.number_of_replicas", 0).put(IndexMetaData.INDEX_ROUTING_INCLUDE_GROUP_SETTING.getKey() + "_name", node1)));
|
||||
index(index, "doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
|
||||
index(index, "_doc", "1", jsonBuilder().startObject().field("text", "some text").endObject());
|
||||
ensureGreen();
|
||||
assertIndexInMetaState(node1, index);
|
||||
Index resolveIndex = resolveIndex(index);
|
||||
|
@ -99,7 +99,7 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
assertThat(clusterStateResponse.getState().getMetaData().index(index).getState().name(), equalTo(IndexMetaData.State.CLOSE.name()));
|
||||
|
||||
// update the mapping. this should cause the new meta data to be written although index is closed
|
||||
client().admin().indices().preparePutMapping(index).setType("doc").setSource(jsonBuilder().startObject()
|
||||
client().admin().indices().preparePutMapping(index).setType("_doc").setSource(jsonBuilder().startObject()
|
||||
.startObject("properties")
|
||||
.startObject("integer_field")
|
||||
.field("type", "integer")
|
||||
|
@ -107,12 +107,12 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
.endObject()
|
||||
.endObject()).get();
|
||||
|
||||
GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes("doc").get();
|
||||
assertNotNull(((LinkedHashMap) (getMappingsResponse.getMappings().get(index).get("doc").getSourceAsMap().get("properties"))).get("integer_field"));
|
||||
GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes("_doc").get();
|
||||
assertNotNull(((LinkedHashMap) (getMappingsResponse.getMappings().get(index).get("_doc").getSourceAsMap().get("properties"))).get("integer_field"));
|
||||
|
||||
// make sure it was also written on red node although index is closed
|
||||
ImmutableOpenMap<String, IndexMetaData> indicesMetaData = getIndicesMetaDataOnNode(dataNode);
|
||||
assertNotNull(((LinkedHashMap) (indicesMetaData.get(index).getMappings().get("doc").getSourceAsMap().get("properties"))).get("integer_field"));
|
||||
assertNotNull(((LinkedHashMap) (indicesMetaData.get(index).getMappings().get("_doc").getSourceAsMap().get("properties"))).get("integer_field"));
|
||||
assertThat(indicesMetaData.get(index).getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
|
||||
/* Try the same and see if this also works if node was just restarted.
|
||||
|
@ -124,7 +124,7 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
* what we write. This is why we explicitly test for it.
|
||||
*/
|
||||
internalCluster().restartNode(dataNode, new RestartCallback());
|
||||
client().admin().indices().preparePutMapping(index).setType("doc").setSource(jsonBuilder().startObject()
|
||||
client().admin().indices().preparePutMapping(index).setType("_doc").setSource(jsonBuilder().startObject()
|
||||
.startObject("properties")
|
||||
.startObject("float_field")
|
||||
.field("type", "float")
|
||||
|
@ -132,12 +132,12 @@ public class MetaDataWriteDataNodesIT extends ESIntegTestCase {
|
|||
.endObject()
|
||||
.endObject()).get();
|
||||
|
||||
getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes("doc").get();
|
||||
assertNotNull(((LinkedHashMap) (getMappingsResponse.getMappings().get(index).get("doc").getSourceAsMap().get("properties"))).get("float_field"));
|
||||
getMappingsResponse = client().admin().indices().prepareGetMappings(index).addTypes("_doc").get();
|
||||
assertNotNull(((LinkedHashMap) (getMappingsResponse.getMappings().get(index).get("_doc").getSourceAsMap().get("properties"))).get("float_field"));
|
||||
|
||||
// make sure it was also written on red node although index is closed
|
||||
indicesMetaData = getIndicesMetaDataOnNode(dataNode);
|
||||
assertNotNull(((LinkedHashMap) (indicesMetaData.get(index).getMappings().get("doc").getSourceAsMap().get("properties"))).get("float_field"));
|
||||
assertNotNull(((LinkedHashMap) (indicesMetaData.get(index).getMappings().get("_doc").getSourceAsMap().get("properties"))).get("float_field"));
|
||||
assertThat(indicesMetaData.get(index).getState(), equalTo(IndexMetaData.State.CLOSE));
|
||||
|
||||
// finally check that meta data is also written of index opened again
|
||||
|
|
|
@ -577,17 +577,17 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
|
||||
public void testGetFieldsMetaDataWithRouting() throws Exception {
|
||||
assertAcked(prepareCreate("test")
|
||||
.addMapping("doc", "field1", "type=keyword,store=true")
|
||||
.addMapping("_doc", "field1", "type=keyword,store=true")
|
||||
.addAlias(new Alias("alias"))
|
||||
.setSettings(Settings.builder().put("index.refresh_interval", -1).put("index.version.created", Version.V_5_6_0.id)));
|
||||
// multi types in 5.6
|
||||
|
||||
client().prepareIndex("test", "doc", "1")
|
||||
client().prepareIndex("test", "_doc", "1")
|
||||
.setRouting("1")
|
||||
.setSource(jsonBuilder().startObject().field("field1", "value").endObject())
|
||||
.get();
|
||||
|
||||
GetResponse getResponse = client().prepareGet(indexOrAlias(), "doc", "1")
|
||||
GetResponse getResponse = client().prepareGet(indexOrAlias(), "_doc", "1")
|
||||
.setRouting("1")
|
||||
.setStoredFields("field1")
|
||||
.get();
|
||||
|
@ -599,7 +599,7 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
|
||||
flush();
|
||||
|
||||
getResponse = client().prepareGet(indexOrAlias(), "doc", "1")
|
||||
getResponse = client().prepareGet(indexOrAlias(), "_doc", "1")
|
||||
.setStoredFields("field1")
|
||||
.setRouting("1")
|
||||
.get();
|
||||
|
@ -778,7 +778,7 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
" \"refresh_interval\": \"-1\"\n" +
|
||||
" },\n" +
|
||||
" \"mappings\": {\n" +
|
||||
" \"doc\": {\n" +
|
||||
" \"_doc\": {\n" +
|
||||
" \"properties\": {\n" +
|
||||
" \"suggest\": {\n" +
|
||||
" \"type\": \"completion\"\n" +
|
||||
|
@ -798,16 +798,16 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
" }\n" +
|
||||
"}";
|
||||
|
||||
index("test", "doc", "1", doc);
|
||||
index("test", "_doc", "1", doc);
|
||||
String[] fieldsList = {"suggest"};
|
||||
// before refresh - document is only in translog
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "_doc", "1", fieldsList);
|
||||
refresh();
|
||||
//after refresh - document is in translog and also indexed
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "_doc", "1", fieldsList);
|
||||
flush();
|
||||
//after flush - document is in not anymore translog - only indexed
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "_doc", "1", fieldsList);
|
||||
}
|
||||
|
||||
public void testUngeneratedFieldsThatAreAlwaysStored() throws IOException {
|
||||
|
@ -821,17 +821,17 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
.addAlias(new Alias("alias")).setSource(createIndexSource, XContentType.JSON));
|
||||
ensureGreen();
|
||||
|
||||
client().prepareIndex("test", "doc", "1").setRouting("routingValue").setId("1").setSource("{}", XContentType.JSON).get();
|
||||
client().prepareIndex("test", "_doc", "1").setRouting("routingValue").setId("1").setSource("{}", XContentType.JSON).get();
|
||||
|
||||
String[] fieldsList = {"_routing"};
|
||||
// before refresh - document is only in translog
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "routingValue");
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "_doc", "1", fieldsList, "routingValue");
|
||||
refresh();
|
||||
//after refresh - document is in translog and also indexed
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "routingValue");
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "_doc", "1", fieldsList, "routingValue");
|
||||
flush();
|
||||
//after flush - document is in not anymore translog - only indexed
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "routingValue");
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "_doc", "1", fieldsList, "routingValue");
|
||||
}
|
||||
|
||||
public void testUngeneratedFieldsNotPartOfSourceStored() throws IOException {
|
||||
|
@ -847,41 +847,41 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
String doc = "{\n" +
|
||||
" \"text\": \"some text.\"\n" +
|
||||
"}\n";
|
||||
client().prepareIndex("test", "doc").setId("1").setSource(doc, XContentType.JSON).setRouting("1").get();
|
||||
client().prepareIndex("test", "_doc").setId("1").setSource(doc, XContentType.JSON).setRouting("1").get();
|
||||
String[] fieldsList = {"_routing"};
|
||||
// before refresh - document is only in translog
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1");
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "_doc", "1", fieldsList, "1");
|
||||
refresh();
|
||||
//after refresh - document is in translog and also indexed
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1");
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "_doc", "1", fieldsList, "1");
|
||||
flush();
|
||||
//after flush - document is in not anymore translog - only indexed
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList, "1");
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "_doc", "1", fieldsList, "1");
|
||||
}
|
||||
|
||||
public void testGeneratedStringFieldsUnstored() throws IOException {
|
||||
indexSingleDocumentWithStringFieldsGeneratedFromText(false, randomBoolean());
|
||||
String[] fieldsList = {"_field_names"};
|
||||
// before refresh - document is only in translog
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "_doc", "1", fieldsList);
|
||||
refresh();
|
||||
//after refresh - document is in translog and also indexed
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "_doc", "1", fieldsList);
|
||||
flush();
|
||||
//after flush - document is in not anymore translog - only indexed
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "doc", "1", fieldsList);
|
||||
assertGetFieldsAlwaysNull(indexOrAlias(), "_doc", "1", fieldsList);
|
||||
}
|
||||
|
||||
public void testGeneratedStringFieldsStored() throws IOException {
|
||||
indexSingleDocumentWithStringFieldsGeneratedFromText(true, randomBoolean());
|
||||
String[] fieldsList = {"text1", "text2"};
|
||||
String[] alwaysNotStoredFieldsList = {"_field_names"};
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
|
||||
assertGetFieldsNull(indexOrAlias(), "doc", "1", alwaysNotStoredFieldsList);
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "_doc", "1", fieldsList);
|
||||
assertGetFieldsNull(indexOrAlias(), "_doc", "1", alwaysNotStoredFieldsList);
|
||||
flush();
|
||||
//after flush - document is in not anymore translog - only indexed
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "doc", "1", fieldsList);
|
||||
assertGetFieldsNull(indexOrAlias(), "doc", "1", alwaysNotStoredFieldsList);
|
||||
assertGetFieldsAlwaysWorks(indexOrAlias(), "_doc", "1", fieldsList);
|
||||
assertGetFieldsNull(indexOrAlias(), "_doc", "1", alwaysNotStoredFieldsList);
|
||||
}
|
||||
|
||||
void indexSingleDocumentWithStringFieldsGeneratedFromText(boolean stored, boolean sourceEnabled) {
|
||||
|
@ -893,7 +893,7 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
" \"refresh_interval\": \"-1\"\n" +
|
||||
" },\n" +
|
||||
" \"mappings\": {\n" +
|
||||
" \"doc\": {\n" +
|
||||
" \"_doc\": {\n" +
|
||||
" \"_source\" : {\"enabled\" : " + sourceEnabled + "}," +
|
||||
" \"properties\": {\n" +
|
||||
" \"text1\": {\n" +
|
||||
|
@ -915,7 +915,7 @@ public class GetActionIT extends ESIntegTestCase {
|
|||
" \"text1\": \"some text.\"\n," +
|
||||
" \"text2\": \"more text.\"\n" +
|
||||
"}\n";
|
||||
index("test", "doc", "1", doc);
|
||||
index("test", "_doc", "1", doc);
|
||||
}
|
||||
|
||||
private void assertGetFieldsAlwaysWorks(String index, String type, String docId, String[] fields) {
|
||||
|
|
|
@ -32,9 +32,9 @@ public class AllFieldMapperTests extends ESSingleNodeTestCase {
|
|||
IndexService indexService = createIndex("test", Settings.builder()
|
||||
.put("index.analysis.analyzer.default_search.type", "custom")
|
||||
.put("index.analysis.analyzer.default_search.tokenizer", "standard").build());
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("doc").endObject().endObject().string();
|
||||
indexService.mapperService().merge("doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping, indexService.mapperService().documentMapper("doc").mapping().toString());
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc").endObject().endObject().string();
|
||||
indexService.mapperService().merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, false);
|
||||
assertEquals(mapping, indexService.mapperService().documentMapper("_doc").mapping().toString());
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -39,13 +39,13 @@ public class CopyToMapperIntegrationIT extends ESIntegTestCase {
|
|||
public void testDynamicTemplateCopyTo() throws Exception {
|
||||
assertAcked(
|
||||
client().admin().indices().prepareCreate("test-idx")
|
||||
.addMapping("doc", createDynamicTemplateMapping())
|
||||
.addMapping("_doc", createDynamicTemplateMapping())
|
||||
);
|
||||
|
||||
int recordCount = between(1, 200);
|
||||
|
||||
for (int i = 0; i < recordCount * 2; i++) {
|
||||
client().prepareIndex("test-idx", "doc", Integer.toString(i))
|
||||
client().prepareIndex("test-idx", "_doc", Integer.toString(i))
|
||||
.setSource("test_field", "test " + i, "even", i % 2 == 0)
|
||||
.get();
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ public class CopyToMapperIntegrationIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testDynamicObjectCopyTo() throws Exception {
|
||||
String mapping = jsonBuilder().startObject().startObject("doc").startObject("properties")
|
||||
String mapping = jsonBuilder().startObject().startObject("_doc").startObject("properties")
|
||||
.startObject("foo")
|
||||
.field("type", "text")
|
||||
.field("copy_to", "root.top.child")
|
||||
|
@ -77,9 +77,9 @@ public class CopyToMapperIntegrationIT extends ESIntegTestCase {
|
|||
.endObject().endObject().endObject().string();
|
||||
assertAcked(
|
||||
client().admin().indices().prepareCreate("test-idx")
|
||||
.addMapping("doc", mapping, XContentType.JSON)
|
||||
.addMapping("_doc", mapping, XContentType.JSON)
|
||||
);
|
||||
client().prepareIndex("test-idx", "doc", "1")
|
||||
client().prepareIndex("test-idx", "_doc", "1")
|
||||
.setSource("foo", "bar")
|
||||
.get();
|
||||
client().admin().indices().prepareRefresh("test-idx").execute().actionGet();
|
||||
|
@ -89,7 +89,7 @@ public class CopyToMapperIntegrationIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
private XContentBuilder createDynamicTemplateMapping() throws IOException {
|
||||
return XContentFactory.jsonBuilder().startObject().startObject("doc")
|
||||
return XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startArray("dynamic_templates")
|
||||
|
||||
.startObject().startObject("template_raw")
|
||||
|
|
|
@ -419,7 +419,7 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
public void testCopyToChildNested() throws Exception {
|
||||
IndexService indexService = createIndex("test");
|
||||
XContentBuilder rootToNestedMapping = jsonBuilder().startObject()
|
||||
.startObject("doc")
|
||||
.startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("source")
|
||||
.field("type", "long")
|
||||
|
@ -437,12 +437,12 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.endObject();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> indexService.mapperService().merge("doc", new CompressedXContent(rootToNestedMapping.bytes()),
|
||||
() -> indexService.mapperService().merge("_doc", new CompressedXContent(rootToNestedMapping.bytes()),
|
||||
MergeReason.MAPPING_UPDATE, false));
|
||||
assertThat(e.getMessage(), Matchers.startsWith("Illegal combination of [copy_to] and [nested] mappings"));
|
||||
|
||||
XContentBuilder nestedToNestedMapping = jsonBuilder().startObject()
|
||||
.startObject("doc")
|
||||
.startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("n1")
|
||||
.field("type", "nested")
|
||||
|
@ -465,14 +465,14 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.endObject();
|
||||
e = expectThrows(IllegalArgumentException.class,
|
||||
() -> indexService.mapperService().merge("doc", new CompressedXContent(nestedToNestedMapping.bytes()),
|
||||
() -> indexService.mapperService().merge("_doc", new CompressedXContent(nestedToNestedMapping.bytes()),
|
||||
MergeReason.MAPPING_UPDATE, false));
|
||||
}
|
||||
|
||||
public void testCopyToSiblingNested() throws Exception {
|
||||
IndexService indexService = createIndex("test");
|
||||
XContentBuilder rootToNestedMapping = jsonBuilder().startObject()
|
||||
.startObject("doc")
|
||||
.startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("n1")
|
||||
.field("type", "nested")
|
||||
|
@ -495,7 +495,7 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.endObject();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> indexService.mapperService().merge("doc", new CompressedXContent(rootToNestedMapping.bytes()),
|
||||
() -> indexService.mapperService().merge("_doc", new CompressedXContent(rootToNestedMapping.bytes()),
|
||||
MergeReason.MAPPING_UPDATE, false));
|
||||
assertThat(e.getMessage(), Matchers.startsWith("Illegal combination of [copy_to] and [nested] mappings"));
|
||||
}
|
||||
|
@ -503,7 +503,7 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
public void testCopyToObject() throws Exception {
|
||||
IndexService indexService = createIndex("test");
|
||||
XContentBuilder rootToNestedMapping = jsonBuilder().startObject()
|
||||
.startObject("doc")
|
||||
.startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("source")
|
||||
.field("type", "long")
|
||||
|
@ -516,7 +516,7 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.endObject();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> indexService.mapperService().merge("doc", new CompressedXContent(rootToNestedMapping.bytes()),
|
||||
() -> indexService.mapperService().merge("_doc", new CompressedXContent(rootToNestedMapping.bytes()),
|
||||
MergeReason.MAPPING_UPDATE, false));
|
||||
assertThat(e.getMessage(), Matchers.startsWith("Cannot copy to field [target] since it is mapped as an object"));
|
||||
}
|
||||
|
@ -569,7 +569,7 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
public void testCopyToMultiField() throws Exception {
|
||||
String mapping = jsonBuilder().startObject().startObject("doc")
|
||||
String mapping = jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("my_field")
|
||||
.field("type", "keyword")
|
||||
|
@ -585,12 +585,12 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> mapperService.merge("doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()));
|
||||
() -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()));
|
||||
assertEquals("[copy_to] may not be used to copy to a multi-field: [my_field.bar]", e.getMessage());
|
||||
}
|
||||
|
||||
public void testNestedCopyTo() throws Exception {
|
||||
String mapping = jsonBuilder().startObject().startObject("doc")
|
||||
String mapping = jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("n")
|
||||
.field("type", "nested")
|
||||
|
@ -608,11 +608,11 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
.endObject().endObject().string();
|
||||
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
mapperService.merge("doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); // no exception
|
||||
mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()); // no exception
|
||||
}
|
||||
|
||||
public void testNestedCopyToMultiField() throws Exception {
|
||||
String mapping = jsonBuilder().startObject().startObject("doc")
|
||||
String mapping = jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("n")
|
||||
.field("type", "nested")
|
||||
|
@ -633,12 +633,12 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> mapperService.merge("doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()));
|
||||
() -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()));
|
||||
assertEquals("[copy_to] may not be used to copy to a multi-field: [n.my_field.bar]", e.getMessage());
|
||||
}
|
||||
|
||||
public void testCopyFromMultiField() throws Exception {
|
||||
String mapping = jsonBuilder().startObject().startObject("doc")
|
||||
String mapping = jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("my_field")
|
||||
.field("type", "keyword")
|
||||
|
@ -654,7 +654,7 @@ public class CopyToMapperTests extends ESSingleNodeTestCase {
|
|||
|
||||
MapperService mapperService = createIndex("test").mapperService();
|
||||
MapperParsingException e = expectThrows(MapperParsingException.class,
|
||||
() -> mapperService.merge("doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()));
|
||||
() -> mapperService.merge("_doc", new CompressedXContent(mapping), MergeReason.MAPPING_UPDATE, randomBoolean()));
|
||||
assertThat(e.getMessage(),
|
||||
Matchers.containsString("copy_to in multi fields is not allowed. Found the copy_to in field [bar] " +
|
||||
"which is within a multi field."));
|
||||
|
|
|
@ -397,15 +397,15 @@ public class DateFieldMapperTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
public void testMergeText() throws Exception {
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("doc")
|
||||
String mapping = XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties").startObject("date").field("type", "date").endObject()
|
||||
.endObject().endObject().endObject().string();
|
||||
DocumentMapper mapper = indexService.mapperService().parse("doc", new CompressedXContent(mapping), false);
|
||||
DocumentMapper mapper = indexService.mapperService().parse("_doc", new CompressedXContent(mapping), false);
|
||||
|
||||
String mappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("doc")
|
||||
String mappingUpdate = XContentFactory.jsonBuilder().startObject().startObject("_doc")
|
||||
.startObject("properties").startObject("date").field("type", "text").endObject()
|
||||
.endObject().endObject().endObject().string();
|
||||
DocumentMapper update = indexService.mapperService().parse("doc", new CompressedXContent(mappingUpdate), false);
|
||||
DocumentMapper update = indexService.mapperService().parse("_doc", new CompressedXContent(mappingUpdate), false);
|
||||
|
||||
IllegalArgumentException e = expectThrows(IllegalArgumentException.class,
|
||||
() -> mapper.merge(update.mapping(), randomBoolean()));
|
||||
|
|
|
@ -534,7 +534,7 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
|
|||
public void testMixTemplateMultiFieldAndMappingReuse() throws Exception {
|
||||
IndexService indexService = createIndex("test");
|
||||
XContentBuilder mappings1 = jsonBuilder().startObject()
|
||||
.startObject("doc")
|
||||
.startObject("_doc")
|
||||
.startArray("dynamic_templates")
|
||||
.startObject()
|
||||
.startObject("template1")
|
||||
|
@ -551,21 +551,21 @@ public class DynamicMappingTests extends ESSingleNodeTestCase {
|
|||
.endObject()
|
||||
.endArray()
|
||||
.endObject().endObject();
|
||||
indexService.mapperService().merge("doc", new CompressedXContent(mappings1.bytes()),
|
||||
indexService.mapperService().merge("_doc", new CompressedXContent(mappings1.bytes()),
|
||||
MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
|
||||
XContentBuilder json = XContentFactory.jsonBuilder().startObject()
|
||||
.field("field", "foo")
|
||||
.endObject();
|
||||
SourceToParse source = SourceToParse.source("test", "doc", "1", json.bytes(), json.contentType());
|
||||
DocumentMapper mapper = indexService.mapperService().documentMapper("doc");
|
||||
SourceToParse source = SourceToParse.source("test", "_doc", "1", json.bytes(), json.contentType());
|
||||
DocumentMapper mapper = indexService.mapperService().documentMapper("_doc");
|
||||
assertNull(mapper.mappers().getMapper("field.raw"));
|
||||
ParsedDocument parsed = mapper.parse(source);
|
||||
assertNotNull(parsed.dynamicMappingsUpdate());
|
||||
|
||||
indexService.mapperService().merge("doc", new CompressedXContent(parsed.dynamicMappingsUpdate().toString()),
|
||||
indexService.mapperService().merge("_doc", new CompressedXContent(parsed.dynamicMappingsUpdate().toString()),
|
||||
MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
mapper = indexService.mapperService().documentMapper("doc");
|
||||
mapper = indexService.mapperService().documentMapper("_doc");
|
||||
assertNotNull(mapper.mappers().getMapper("field.raw"));
|
||||
parsed = mapper.parse(source);
|
||||
assertNull(parsed.dynamicMappingsUpdate());
|
||||
|
|
|
@ -131,8 +131,8 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase {
|
|||
}
|
||||
|
||||
public void testExternalValuesWithMultifield() throws Exception {
|
||||
prepareCreate("test-idx").addMapping("doc",
|
||||
XContentFactory.jsonBuilder().startObject().startObject("doc").startObject("properties")
|
||||
prepareCreate("test-idx").addMapping("_doc",
|
||||
XContentFactory.jsonBuilder().startObject().startObject("_doc").startObject("properties")
|
||||
.startObject("f")
|
||||
.field("type", ExternalMapperPlugin.EXTERNAL_UPPER)
|
||||
.startObject("fields")
|
||||
|
@ -150,7 +150,7 @@ public class ExternalValuesMapperIntegrationIT extends ESIntegTestCase {
|
|||
.endObject()
|
||||
.endObject().endObject().endObject()).execute().get();
|
||||
|
||||
index("test-idx", "doc", "1", "f", "This is my text");
|
||||
index("test-idx", "_doc", "1", "f", "This is my text");
|
||||
refresh();
|
||||
|
||||
SearchResponse response = client().prepareSearch("test-idx")
|
||||
|
|
|
@ -59,7 +59,7 @@ public class FieldFilterMapperPluginTests extends ESSingleNodeTestCase {
|
|||
assertAcked(client().admin().indices().prepareCreate("index1"));
|
||||
assertAcked(client().admin().indices().prepareCreate("filtered"));
|
||||
assertAcked(client().admin().indices().preparePutMapping("index1", "filtered")
|
||||
.setType("doc").setSource(TEST_ITEM, XContentType.JSON));
|
||||
.setType("_doc").setSource(TEST_ITEM, XContentType.JSON));
|
||||
}
|
||||
|
||||
public void testGetMappings() {
|
||||
|
@ -83,7 +83,7 @@ public class FieldFilterMapperPluginTests extends ESSingleNodeTestCase {
|
|||
//as the one coming from a filtered index with same mappings
|
||||
GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("filtered").get();
|
||||
ImmutableOpenMap<String, MappingMetaData> filtered = getMappingsResponse.getMappings().get("filtered");
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("doc", filtered.get("doc").getSourceAsMap()));
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("_doc", filtered.get("_doc").getSourceAsMap()));
|
||||
GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("test").setFields("*").get();
|
||||
assertEquals(1, response.mappings().size());
|
||||
assertFieldMappings(response.mappings().get("test"), FILTERED_FLAT_FIELDS);
|
||||
|
@ -98,7 +98,7 @@ public class FieldFilterMapperPluginTests extends ESSingleNodeTestCase {
|
|||
//as the one coming from a filtered index with same mappings
|
||||
GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("filtered").get();
|
||||
ImmutableOpenMap<String, MappingMetaData> filteredMapping = getMappingsResponse.getMappings().get("filtered");
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("doc", filteredMapping.get("doc").getSourceAsMap()));
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("_doc", filteredMapping.get("_doc").getSourceAsMap()));
|
||||
FieldCapabilitiesResponse test = client().fieldCaps(new FieldCapabilitiesRequest().fields("*").indices("test")).actionGet();
|
||||
assertFieldCaps(test, FILTERED_FLAT_FIELDS);
|
||||
}
|
||||
|
@ -120,7 +120,7 @@ public class FieldFilterMapperPluginTests extends ESSingleNodeTestCase {
|
|||
private static void assertFieldMappings(Map<String, Map<String, GetFieldMappingsResponse.FieldMappingMetaData>> mappings,
|
||||
String[] expectedFields) {
|
||||
assertEquals(1, mappings.size());
|
||||
Map<String, GetFieldMappingsResponse.FieldMappingMetaData> fields = new HashMap<>(mappings.get("doc"));
|
||||
Map<String, GetFieldMappingsResponse.FieldMappingMetaData> fields = new HashMap<>(mappings.get("_doc"));
|
||||
Set<String> builtInMetaDataFields = IndicesModule.getBuiltInMetaDataFields();
|
||||
for (String field : builtInMetaDataFields) {
|
||||
GetFieldMappingsResponse.FieldMappingMetaData fieldMappingMetaData = fields.remove(field);
|
||||
|
@ -138,12 +138,12 @@ public class FieldFilterMapperPluginTests extends ESSingleNodeTestCase {
|
|||
assertNotFiltered(mappings.get("index1"));
|
||||
ImmutableOpenMap<String, MappingMetaData> filtered = mappings.get("filtered");
|
||||
assertFiltered(filtered);
|
||||
assertMappingsAreValid(filtered.get("doc").getSourceAsMap());
|
||||
assertMappingsAreValid(filtered.get("_doc").getSourceAsMap());
|
||||
}
|
||||
|
||||
private void assertMappingsAreValid(Map<String, Object> sourceAsMap) {
|
||||
//check that the returned filtered mappings are still valid mappings by submitting them and retrieving them back
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("doc", sourceAsMap));
|
||||
assertAcked(client().admin().indices().prepareCreate("test").addMapping("_doc", sourceAsMap));
|
||||
GetMappingsResponse testMappingsResponse = client().admin().indices().prepareGetMappings("test").get();
|
||||
assertEquals(1, testMappingsResponse.getMappings().size());
|
||||
//the mappings are returned unfiltered for this index, yet they are the same as the previous ones that were returned filtered
|
||||
|
@ -153,7 +153,7 @@ public class FieldFilterMapperPluginTests extends ESSingleNodeTestCase {
|
|||
@SuppressWarnings("unchecked")
|
||||
private static void assertFiltered(ImmutableOpenMap<String, MappingMetaData> mappings) {
|
||||
assertEquals(1, mappings.size());
|
||||
MappingMetaData mappingMetaData = mappings.get("doc");
|
||||
MappingMetaData mappingMetaData = mappings.get("_doc");
|
||||
assertNotNull(mappingMetaData);
|
||||
Map<String, Object> sourceAsMap = mappingMetaData.getSourceAsMap();
|
||||
assertEquals(4, sourceAsMap.size());
|
||||
|
@ -200,7 +200,7 @@ public class FieldFilterMapperPluginTests extends ESSingleNodeTestCase {
|
|||
@SuppressWarnings("unchecked")
|
||||
private static void assertNotFiltered(ImmutableOpenMap<String, MappingMetaData> mappings) {
|
||||
assertEquals(1, mappings.size());
|
||||
MappingMetaData mappingMetaData = mappings.get("doc");
|
||||
MappingMetaData mappingMetaData = mappings.get("_doc");
|
||||
assertNotNull(mappingMetaData);
|
||||
Map<String, Object> sourceAsMap = mappingMetaData.getSourceAsMap();
|
||||
assertEquals(4, sourceAsMap.size());
|
||||
|
@ -255,7 +255,7 @@ public class FieldFilterMapperPluginTests extends ESSingleNodeTestCase {
|
|||
};
|
||||
|
||||
private static final String TEST_ITEM = "{\n" +
|
||||
" \"doc\": {\n" +
|
||||
" \"_doc\": {\n" +
|
||||
" \"_meta\": {\n" +
|
||||
" \"version\":0.19\n" +
|
||||
" }," +
|
||||
|
|
|
@ -29,6 +29,7 @@ import org.elasticsearch.index.IndexService;
|
|||
import org.elasticsearch.index.mapper.KeywordFieldMapper.KeywordFieldType;
|
||||
import org.elasticsearch.index.mapper.MapperService.MergeReason;
|
||||
import org.elasticsearch.index.mapper.NumberFieldMapper.NumberFieldType;
|
||||
import org.elasticsearch.indices.InvalidTypeNameException;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.test.ESSingleNodeTestCase;
|
||||
import org.elasticsearch.test.InternalSettingsPlugin;
|
||||
|
@ -101,6 +102,16 @@ public class MapperServiceTests extends ESSingleNodeTestCase {
|
|||
assertEquals(new HashSet<>(Arrays.asList("type1", "type2")), mapperService.types());
|
||||
}
|
||||
|
||||
public void testTypeValidation() {
|
||||
InvalidTypeNameException e = expectThrows(InvalidTypeNameException.class, () -> MapperService.validateTypeName("_type"));
|
||||
assertEquals("mapping type name [_type] can't start with '_' unless it is called [_doc]", e.getMessage());
|
||||
|
||||
e = expectThrows(InvalidTypeNameException.class, () -> MapperService.validateTypeName("_document"));
|
||||
assertEquals("mapping type name [_document] can't start with '_' unless it is called [_doc]", e.getMessage());
|
||||
|
||||
MapperService.validateTypeName("_doc"); // no exception
|
||||
}
|
||||
|
||||
public void testIndexIntoDefaultMapping() throws Throwable {
|
||||
// 1. test implicit index creation
|
||||
ExecutionException e = expectThrows(ExecutionException.class, () -> {
|
||||
|
|
|
@ -495,7 +495,7 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
}
|
||||
|
||||
public void testParentObjectMapperAreNested() throws Exception {
|
||||
MapperService mapperService = createIndex("index1", Settings.EMPTY, "doc", jsonBuilder().startObject()
|
||||
MapperService mapperService = createIndex("index1", Settings.EMPTY, "_doc", jsonBuilder().startObject()
|
||||
.startObject("properties")
|
||||
.startObject("comments")
|
||||
.field("type", "nested")
|
||||
|
@ -509,7 +509,7 @@ public class NestedObjectMapperTests extends ESSingleNodeTestCase {
|
|||
ObjectMapper objectMapper = mapperService.getObjectMapper("comments.messages");
|
||||
assertTrue(objectMapper.parentObjectMapperAreNested(mapperService));
|
||||
|
||||
mapperService = createIndex("index2", Settings.EMPTY, "doc", jsonBuilder().startObject()
|
||||
mapperService = createIndex("index2", Settings.EMPTY, "_doc", jsonBuilder().startObject()
|
||||
.startObject("properties")
|
||||
.startObject("comments")
|
||||
.field("type", "object")
|
||||
|
|
|
@ -350,8 +350,8 @@ public class MatchQueryBuilderTests extends AbstractQueryTestCase<MatchQueryBuil
|
|||
|
||||
@Override
|
||||
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
|
||||
mapperService.merge("doc", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(
|
||||
"doc",
|
||||
mapperService.merge("_doc", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(
|
||||
"_doc",
|
||||
"string_boost", "type=text,boost=4", "string_no_pos",
|
||||
"type=text,index_options=docs").string()
|
||||
),
|
||||
|
|
|
@ -58,7 +58,7 @@ public class NestedQueryBuilderTests extends AbstractQueryTestCase<NestedQueryBu
|
|||
|
||||
@Override
|
||||
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
|
||||
mapperService.merge("doc", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("doc",
|
||||
mapperService.merge("_doc", new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef("_doc",
|
||||
STRING_FIELD_NAME, "type=text",
|
||||
INT_FIELD_NAME, "type=integer",
|
||||
DOUBLE_FIELD_NAME, "type=double",
|
||||
|
|
|
@ -830,9 +830,9 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
|
|||
public void testDisabledFieldNamesField() throws Exception {
|
||||
assumeTrue("No types", getCurrentTypes().length > 0);
|
||||
QueryShardContext context = createShardContext();
|
||||
context.getMapperService().merge("doc",
|
||||
context.getMapperService().merge("_doc",
|
||||
new CompressedXContent(
|
||||
PutMappingRequest.buildFromSimplifiedDef("doc",
|
||||
PutMappingRequest.buildFromSimplifiedDef("_doc",
|
||||
"foo", "type=text",
|
||||
"_field_names", "enabled=false").string()),
|
||||
MapperService.MergeReason.MAPPING_UPDATE, true);
|
||||
|
@ -843,9 +843,9 @@ public class QueryStringQueryBuilderTests extends AbstractQueryTestCase<QueryStr
|
|||
assertThat(query, equalTo(expected));
|
||||
} finally {
|
||||
// restore mappings as they were before
|
||||
context.getMapperService().merge("doc",
|
||||
context.getMapperService().merge("_doc",
|
||||
new CompressedXContent(
|
||||
PutMappingRequest.buildFromSimplifiedDef("doc",
|
||||
PutMappingRequest.buildFromSimplifiedDef("_doc",
|
||||
"foo", "type=text",
|
||||
"_field_names", "enabled=true").string()),
|
||||
MapperService.MergeReason.MAPPING_UPDATE, true);
|
||||
|
|
|
@ -73,7 +73,7 @@ public class TermsSetQueryBuilderTests extends AbstractQueryTestCase<TermsSetQue
|
|||
|
||||
@Override
|
||||
protected void initializeAdditionalMappings(MapperService mapperService) throws IOException {
|
||||
String docType = "doc";
|
||||
String docType = "_doc";
|
||||
mapperService.merge(docType, new CompressedXContent(PutMappingRequest.buildFromSimplifiedDef(docType,
|
||||
"m_s_m", "type=long"
|
||||
).string()), MapperService.MergeReason.MAPPING_UPDATE, false);
|
||||
|
|
|
@ -37,7 +37,7 @@ public class TypeQueryBuilderTests extends AbstractQueryTestCase<TypeQueryBuilde
|
|||
|
||||
@Override
|
||||
protected TypeQueryBuilder doCreateTestQueryBuilder() {
|
||||
return new TypeQueryBuilder("doc");
|
||||
return new TypeQueryBuilder("_doc");
|
||||
}
|
||||
|
||||
@Override
|
||||
|
|
|
@ -537,13 +537,13 @@ public class IndexShardIT extends ESSingleNodeTestCase {
|
|||
.setTransientSettings(Settings.builder().put("network.breaker.inflight_requests.overhead", 0.0)).get();
|
||||
|
||||
// Generate a couple of segments
|
||||
client().prepareIndex("test", "doc", "1").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
|
||||
client().prepareIndex("test", "_doc", "1").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
|
||||
.setRefreshPolicy(IMMEDIATE).get();
|
||||
// Use routing so 2 documents are guarenteed to be on the same shard
|
||||
String routing = randomAlphaOfLength(5);
|
||||
client().prepareIndex("test", "doc", "2").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
|
||||
client().prepareIndex("test", "_doc", "2").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
|
||||
.setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
|
||||
client().prepareIndex("test", "doc", "3").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
|
||||
client().prepareIndex("test", "_doc", "3").setSource("{\"foo\":\"" + randomAlphaOfLength(100) + "\"}", XContentType.JSON)
|
||||
.setRefreshPolicy(IMMEDIATE).setRouting(routing).get();
|
||||
|
||||
checkAccountingBreaker();
|
||||
|
|
|
@ -80,7 +80,7 @@ public class TermVectorsServiceTests extends ESSingleNodeTestCase {
|
|||
public void testDocFreqs() throws IOException {
|
||||
XContentBuilder mapping = jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("doc")
|
||||
.startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("text")
|
||||
.field("type", "text")
|
||||
|
@ -92,18 +92,18 @@ public class TermVectorsServiceTests extends ESSingleNodeTestCase {
|
|||
Settings settings = Settings.builder()
|
||||
.put("number_of_shards", 1)
|
||||
.build();
|
||||
createIndex("test", settings, "doc", mapping);
|
||||
createIndex("test", settings, "_doc", mapping);
|
||||
ensureGreen();
|
||||
|
||||
int max = between(3, 10);
|
||||
BulkRequestBuilder bulk = client().prepareBulk();
|
||||
for (int i = 0; i < max; i++) {
|
||||
bulk.add(client().prepareIndex("test", "doc", Integer.toString(i))
|
||||
bulk.add(client().prepareIndex("test", "_doc", Integer.toString(i))
|
||||
.setSource("text", "the quick brown fox jumped over the lazy dog"));
|
||||
}
|
||||
bulk.get();
|
||||
|
||||
TermVectorsRequest request = new TermVectorsRequest("test", "doc", "0").termStatistics(true);
|
||||
TermVectorsRequest request = new TermVectorsRequest("test", "_doc", "0").termStatistics(true);
|
||||
|
||||
IndicesService indicesService = getInstanceFromNode(IndicesService.class);
|
||||
IndexService test = indicesService.indexService(resolveIndex("test"));
|
||||
|
|
|
@ -165,7 +165,7 @@ public class FlushIT extends ESIntegTestCase {
|
|||
@Override
|
||||
public void run() {
|
||||
while (stop.get() == false) {
|
||||
client().prepareIndex().setIndex("test").setType("doc").setSource("{}", XContentType.JSON).get();
|
||||
client().prepareIndex().setIndex("test").setType("_doc").setSource("{}", XContentType.JSON).get();
|
||||
numDocs.incrementAndGet();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -248,14 +248,14 @@ public class SimpleGetFieldMappingsIT extends ESIntegTestCase {
|
|||
|
||||
public void testGetFieldMappingsWithBlocks() throws Exception {
|
||||
assertAcked(prepareCreate("test")
|
||||
.addMapping("doc", getMappingForType("doc")));
|
||||
.addMapping("_doc", getMappingForType("_doc")));
|
||||
|
||||
for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE, SETTING_READ_ONLY)) {
|
||||
try {
|
||||
enableIndexBlock("test", block);
|
||||
GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("test").setTypes("doc")
|
||||
GetFieldMappingsResponse response = client().admin().indices().prepareGetFieldMappings("test").setTypes("_doc")
|
||||
.setFields("field1", "obj.subfield").get();
|
||||
assertThat(response.fieldMappings("test", "doc", "field1").fullName(), equalTo("field1"));
|
||||
assertThat(response.fieldMappings("test", "_doc", "field1").fullName(), equalTo("field1"));
|
||||
} finally {
|
||||
disableIndexBlock("test", block);
|
||||
}
|
||||
|
|
|
@ -156,7 +156,7 @@ public class SimpleGetMappingsIT extends ESIntegTestCase {
|
|||
|
||||
public void testGetMappingsWithBlocks() throws IOException {
|
||||
client().admin().indices().prepareCreate("test")
|
||||
.addMapping("doc", getMappingForType("doc"))
|
||||
.addMapping("_doc", getMappingForType("_doc"))
|
||||
.execute().actionGet();
|
||||
ensureGreen();
|
||||
|
||||
|
|
|
@ -111,19 +111,19 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
Settings.builder()
|
||||
.put("index.number_of_shards", 1)
|
||||
.put("index.number_of_replicas", 0)
|
||||
).addMapping("doc", "{\"doc\":{\"properties\":{\"body\":{\"type\":\"text\"}}}}", XContentType.JSON)
|
||||
).addMapping("_doc", "{\"_doc\":{\"properties\":{\"body\":{\"type\":\"text\"}}}}", XContentType.JSON)
|
||||
.execute().actionGet();
|
||||
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||
|
||||
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("doc")
|
||||
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("_doc")
|
||||
.setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON)
|
||||
.execute().actionGet();
|
||||
|
||||
assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet();
|
||||
assertThat(getMappingsResponse.mappings().get("test").get("doc").source().toString(),
|
||||
equalTo("{\"doc\":{\"properties\":{\"body\":{\"type\":\"text\"},\"date\":{\"type\":\"integer\"}}}}"));
|
||||
assertThat(getMappingsResponse.mappings().get("test").get("_doc").source().toString(),
|
||||
equalTo("{\"_doc\":{\"properties\":{\"body\":{\"type\":\"text\"},\"date\":{\"type\":\"integer\"}}}}"));
|
||||
}
|
||||
|
||||
public void testUpdateMappingWithoutTypeMultiObjects() {
|
||||
|
@ -135,15 +135,15 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
).execute().actionGet();
|
||||
client().admin().cluster().prepareHealth().setWaitForEvents(Priority.LANGUID).setWaitForGreenStatus().execute().actionGet();
|
||||
|
||||
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("doc")
|
||||
PutMappingResponse putMappingResponse = client().admin().indices().preparePutMapping("test").setType("_doc")
|
||||
.setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON)
|
||||
.execute().actionGet();
|
||||
|
||||
assertThat(putMappingResponse.isAcknowledged(), equalTo(true));
|
||||
|
||||
GetMappingsResponse getMappingsResponse = client().admin().indices().prepareGetMappings("test").execute().actionGet();
|
||||
assertThat(getMappingsResponse.mappings().get("test").get("doc").source().toString(),
|
||||
equalTo("{\"doc\":{\"properties\":{\"date\":{\"type\":\"integer\"}}}}"));
|
||||
assertThat(getMappingsResponse.mappings().get("test").get("_doc").source().toString(),
|
||||
equalTo("{\"_doc\":{\"properties\":{\"date\":{\"type\":\"integer\"}}}}"));
|
||||
}
|
||||
|
||||
public void testUpdateMappingWithConflicts() {
|
||||
|
@ -326,7 +326,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
for (String block : Arrays.asList(SETTING_BLOCKS_READ, SETTING_BLOCKS_WRITE)) {
|
||||
try {
|
||||
enableIndexBlock("test", block);
|
||||
assertAcked(client().admin().indices().preparePutMapping("test").setType("doc")
|
||||
assertAcked(client().admin().indices().preparePutMapping("test").setType("_doc")
|
||||
.setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON));
|
||||
} finally {
|
||||
disableIndexBlock("test", block);
|
||||
|
@ -336,7 +336,7 @@ public class UpdateMappingIntegrationIT extends ESIntegTestCase {
|
|||
for (String block : Arrays.asList(SETTING_READ_ONLY, SETTING_BLOCKS_METADATA)) {
|
||||
try {
|
||||
enableIndexBlock("test", block);
|
||||
assertBlocked(client().admin().indices().preparePutMapping("test").setType("doc")
|
||||
assertBlocked(client().admin().indices().preparePutMapping("test").setType("_doc")
|
||||
.setSource("{\"properties\":{\"date\":{\"type\":\"integer\"}}}", XContentType.JSON));
|
||||
} finally {
|
||||
disableIndexBlock("test", block);
|
||||
|
|
|
@ -544,7 +544,7 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
assertThat(stats.getTotal().getSearch(), nullValue());
|
||||
|
||||
for (int i = 0; i < 20; i++) {
|
||||
client().prepareIndex("test_index", "doc", Integer.toString(i)).setSource("field", "value").execute().actionGet();
|
||||
client().prepareIndex("test_index", "_doc", Integer.toString(i)).setSource("field", "value").execute().actionGet();
|
||||
client().admin().indices().prepareFlush().execute().actionGet();
|
||||
}
|
||||
client().admin().indices().prepareForceMerge().setMaxNumSegments(1).execute().actionGet();
|
||||
|
@ -564,7 +564,7 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
NumShards test1 = getNumShards("test_index");
|
||||
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test_index", "doc", Integer.toString(i), "field", "value");
|
||||
index("test_index", "_doc", Integer.toString(i), "field", "value");
|
||||
}
|
||||
|
||||
IndicesStatsResponse stats = client().admin().indices().prepareStats().setSegments(true).get();
|
||||
|
@ -588,8 +588,8 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
|
||||
ensureGreen();
|
||||
|
||||
client().prepareIndex("test_index", "doc", Integer.toString(1)).setSource("field", "value").execute().actionGet();
|
||||
client().prepareIndex("test_index", "doc", Integer.toString(2)).setSource("field", "value").execute().actionGet();
|
||||
client().prepareIndex("test_index", "_doc", Integer.toString(1)).setSource("field", "value").execute().actionGet();
|
||||
client().prepareIndex("test_index", "_doc", Integer.toString(2)).setSource("field", "value").execute().actionGet();
|
||||
client().prepareIndex("test_index_2", "type", Integer.toString(1)).setSource("field", "value").execute().actionGet();
|
||||
|
||||
client().admin().indices().prepareRefresh().execute().actionGet();
|
||||
|
@ -710,9 +710,9 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
|
||||
ensureGreen();
|
||||
|
||||
client().prepareIndex("test1", "doc", Integer.toString(1)).setSource("field", "value").execute().actionGet();
|
||||
client().prepareIndex("test1", "doc", Integer.toString(2)).setSource("field", "value").execute().actionGet();
|
||||
client().prepareIndex("test2", "doc", Integer.toString(1)).setSource("field", "value").execute().actionGet();
|
||||
client().prepareIndex("test1", "_doc", Integer.toString(1)).setSource("field", "value").execute().actionGet();
|
||||
client().prepareIndex("test1", "_doc", Integer.toString(2)).setSource("field", "value").execute().actionGet();
|
||||
client().prepareIndex("test2", "_doc", Integer.toString(1)).setSource("field", "value").execute().actionGet();
|
||||
refresh();
|
||||
|
||||
int numShards1 = getNumShards("test1").totalNumShards;
|
||||
|
@ -746,13 +746,13 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
public void testFieldDataFieldsParam() throws Exception {
|
||||
assertAcked(client().admin().indices().prepareCreate("test1")
|
||||
.setSettings(Settings.builder().put("index.version.created", Version.V_5_6_0.id))
|
||||
.addMapping("doc", "bar", "type=text,fielddata=true",
|
||||
.addMapping("_doc", "bar", "type=text,fielddata=true",
|
||||
"baz", "type=text,fielddata=true").get());
|
||||
|
||||
ensureGreen();
|
||||
|
||||
client().prepareIndex("test1", "doc", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get();
|
||||
client().prepareIndex("test1", "doc", Integer.toString(2)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get();
|
||||
client().prepareIndex("test1", "_doc", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get();
|
||||
client().prepareIndex("test1", "_doc", Integer.toString(2)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get();
|
||||
refresh();
|
||||
|
||||
client().prepareSearch("_all").addSort("bar", SortOrder.ASC).addSort("baz", SortOrder.ASC).execute().actionGet();
|
||||
|
@ -794,11 +794,11 @@ public class IndexStatsIT extends ESIntegTestCase {
|
|||
public void testCompletionFieldsParam() throws Exception {
|
||||
assertAcked(prepareCreate("test1")
|
||||
.addMapping(
|
||||
"doc",
|
||||
"_doc",
|
||||
"{ \"properties\": { \"bar\": { \"type\": \"text\", \"fields\": { \"completion\": { \"type\": \"completion\" }}},\"baz\": { \"type\": \"text\", \"fields\": { \"completion\": { \"type\": \"completion\" }}}}}", XContentType.JSON));
|
||||
ensureGreen();
|
||||
|
||||
client().prepareIndex("test1", "doc", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get();
|
||||
client().prepareIndex("test1", "_doc", Integer.toString(1)).setSource("{\"bar\":\"bar\",\"baz\":\"baz\"}", XContentType.JSON).get();
|
||||
refresh();
|
||||
|
||||
IndicesStatsRequestBuilder builder = client().admin().indices().prepareStats();
|
||||
|
|
|
@ -450,15 +450,15 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
|||
|
||||
|
||||
assertAcked(prepareCreate("test_index")
|
||||
.addMapping("doc"));
|
||||
.addMapping("_doc"));
|
||||
ensureGreen();
|
||||
|
||||
GetAliasesResponse getAliasesResponse = client().admin().indices().prepareGetAliases().setIndices("test_index").get();
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(1));
|
||||
assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(1));
|
||||
|
||||
client().prepareIndex("test_index", "doc", "1").setSource("field", "value1").get();
|
||||
client().prepareIndex("test_index", "doc", "2").setSource("field", "value2").get();
|
||||
client().prepareIndex("test_index", "_doc", "1").setSource("field", "value1").get();
|
||||
client().prepareIndex("test_index", "_doc", "2").setSource("field", "value2").get();
|
||||
refresh();
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch("test_index").get();
|
||||
|
@ -486,15 +486,15 @@ public class SimpleIndexTemplateIT extends ESIntegTestCase {
|
|||
" }\n").get();
|
||||
|
||||
assertAcked(prepareCreate("test_index")
|
||||
.addMapping("doc"));
|
||||
.addMapping("_doc"));
|
||||
ensureGreen();
|
||||
|
||||
GetAliasesResponse getAliasesResponse = client().admin().indices().prepareGetAliases().setIndices("test_index").get();
|
||||
assertThat(getAliasesResponse.getAliases().size(), equalTo(1));
|
||||
assertThat(getAliasesResponse.getAliases().get("test_index").size(), equalTo(3));
|
||||
|
||||
client().prepareIndex("test_index", "doc", "1").setSource("field", "value1").get();
|
||||
client().prepareIndex("test_index", "doc", "2").setSource("field", "value2").get();
|
||||
client().prepareIndex("test_index", "_doc", "1").setSource("field", "value1").get();
|
||||
client().prepareIndex("test_index", "_doc", "2").setSource("field", "value2").get();
|
||||
refresh();
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch("test_index").get();
|
||||
|
|
|
@ -82,7 +82,7 @@ import static org.hamcrest.Matchers.is;
|
|||
public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
||||
|
||||
static final String INDEX_NAME = "testidx";
|
||||
static final String DOC_TYPE = "doc";
|
||||
static final String DOC_TYPE = "_doc";
|
||||
static final String TEXT_FIELD = "text";
|
||||
static final String CLASS_FIELD = "class";
|
||||
|
||||
|
@ -341,7 +341,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
|||
public void testDeletesIssue7951() throws Exception {
|
||||
String settings = "{\"index.number_of_shards\": 1, \"index.number_of_replicas\": 0}";
|
||||
assertAcked(prepareCreate(INDEX_NAME).setSettings(settings, XContentType.JSON)
|
||||
.addMapping("doc", "text", "type=keyword", CLASS_FIELD, "type=keyword"));
|
||||
.addMapping("_doc", "text", "type=keyword", CLASS_FIELD, "type=keyword"));
|
||||
String[] cat1v1 = {"constant", "one"};
|
||||
String[] cat1v2 = {"constant", "uno"};
|
||||
String[] cat2v1 = {"constant", "two"};
|
||||
|
@ -545,7 +545,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
|||
private void indexEqualTestData() throws ExecutionException, InterruptedException {
|
||||
assertAcked(prepareCreate("test")
|
||||
.setSettings(Settings.builder().put(SETTING_NUMBER_OF_SHARDS, 1).put(SETTING_NUMBER_OF_REPLICAS, 0))
|
||||
.addMapping("doc", "text", "type=text,fielddata=true", "class", "type=keyword"));
|
||||
.addMapping("_doc", "text", "type=text,fielddata=true", "class", "type=keyword"));
|
||||
createIndex("idx_unmapped");
|
||||
|
||||
ensureGreen();
|
||||
|
@ -571,7 +571,7 @@ public class SignificantTermsSignificanceScoreIT extends ESIntegTestCase {
|
|||
List<IndexRequestBuilder> indexRequestBuilders = new ArrayList<>();
|
||||
for (int i = 0; i < data.length; i++) {
|
||||
String[] parts = data[i].split("\t");
|
||||
indexRequestBuilders.add(client().prepareIndex("test", "doc", "" + i)
|
||||
indexRequestBuilders.add(client().prepareIndex("test", "_doc", "" + i)
|
||||
.setSource("class", parts[0], "text", parts[1]));
|
||||
}
|
||||
indexRandom(true, false, indexRequestBuilders);
|
||||
|
|
|
@ -71,9 +71,9 @@ public class QueryStringIT extends ESIntegTestCase {
|
|||
|
||||
public void testBasicAllQuery() throws Exception {
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo bar baz"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "2").setSource("f2", "Bar"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "3").setSource("f3", "foo bar baz"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource("f1", "foo bar baz"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "2").setSource("f2", "Bar"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "3").setSource("f3", "foo bar baz"));
|
||||
indexRandom(true, false, reqs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get();
|
||||
|
@ -91,8 +91,8 @@ public class QueryStringIT extends ESIntegTestCase {
|
|||
|
||||
public void testWithDate() throws Exception {
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo", "f_date", "2015/09/02"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar", "f_date", "2015/09/01"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource("f1", "foo", "f_date", "2015/09/02"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "2").setSource("f1", "bar", "f_date", "2015/09/01"));
|
||||
indexRandom(true, false, reqs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo bar")).get();
|
||||
|
@ -114,11 +114,11 @@ public class QueryStringIT extends ESIntegTestCase {
|
|||
|
||||
public void testWithLotsOfTypes() throws Exception {
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo",
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource("f1", "foo",
|
||||
"f_date", "2015/09/02",
|
||||
"f_float", "1.7",
|
||||
"f_ip", "127.0.0.1"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar",
|
||||
reqs.add(client().prepareIndex("test", "_doc", "2").setSource("f1", "bar",
|
||||
"f_date", "2015/09/01",
|
||||
"f_float", "1.8",
|
||||
"f_ip", "127.0.0.2"));
|
||||
|
@ -144,7 +144,7 @@ public class QueryStringIT extends ESIntegTestCase {
|
|||
public void testDocWithAllTypes() throws Exception {
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
String docBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-example-document.json");
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource(docBody, XContentType.JSON));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource(docBody, XContentType.JSON));
|
||||
indexRandom(true, false, reqs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get();
|
||||
|
@ -181,9 +181,9 @@ public class QueryStringIT extends ESIntegTestCase {
|
|||
|
||||
public void testKeywordWithWhitespace() throws Exception {
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource("f2", "Foo Bar"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "3").setSource("f1", "foo bar"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource("f2", "Foo Bar"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "2").setSource("f1", "bar"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "3").setSource("f1", "foo bar"));
|
||||
indexRandom(true, false, reqs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("test").setQuery(queryStringQuery("foo")).get();
|
||||
|
@ -209,7 +209,7 @@ public class QueryStringIT extends ESIntegTestCase {
|
|||
ensureGreen("test_1");
|
||||
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
reqs.add(client().prepareIndex("test_1", "doc", "1").setSource("f1", "foo", "f2", "eggplant"));
|
||||
reqs.add(client().prepareIndex("test_1", "_doc", "1").setSource("f1", "foo", "f2", "eggplant"));
|
||||
indexRandom(true, false, reqs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("test_1").setQuery(
|
||||
|
@ -225,8 +225,8 @@ public class QueryStringIT extends ESIntegTestCase {
|
|||
|
||||
public void testPhraseQueryOnFieldWithNoPositions() throws Exception {
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo bar", "f4", "eggplant parmesan"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "foo bar", "f4", "chicken parmesan"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource("f1", "foo bar", "f4", "eggplant parmesan"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "2").setSource("f1", "foo bar", "f4", "chicken parmesan"));
|
||||
indexRandom(true, false, reqs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("test")
|
||||
|
|
|
@ -1194,12 +1194,12 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
public void testBasicQueryById() throws Exception {
|
||||
assertAcked(prepareCreate("test"));
|
||||
|
||||
client().prepareIndex("test", "doc", "1").setSource("field1", "value1").get();
|
||||
client().prepareIndex("test", "doc", "2").setSource("field1", "value2").get();
|
||||
client().prepareIndex("test", "doc", "3").setSource("field1", "value3").get();
|
||||
client().prepareIndex("test", "_doc", "1").setSource("field1", "value1").get();
|
||||
client().prepareIndex("test", "_doc", "2").setSource("field1", "value2").get();
|
||||
client().prepareIndex("test", "_doc", "3").setSource("field1", "value3").get();
|
||||
refresh();
|
||||
|
||||
SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery("doc").addIds("1", "2")).get();
|
||||
SearchResponse searchResponse = client().prepareSearch().setQuery(idsQuery("_doc").addIds("1", "2")).get();
|
||||
assertHitCount(searchResponse, 2L);
|
||||
assertThat(searchResponse.getHits().getHits().length, equalTo(2));
|
||||
|
||||
|
@ -1215,7 +1215,7 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
assertHitCount(searchResponse, 1L);
|
||||
assertThat(searchResponse.getHits().getHits().length, equalTo(1));
|
||||
|
||||
searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2", "doc").addIds("1", "2", "3", "4")).get();
|
||||
searchResponse = client().prepareSearch().setQuery(idsQuery("type1", "type2", "_doc").addIds("1", "2", "3", "4")).get();
|
||||
assertHitCount(searchResponse, 3L);
|
||||
assertThat(searchResponse.getHits().getHits().length, equalTo(3));
|
||||
}
|
||||
|
@ -1489,9 +1489,9 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
|
||||
public void testSimpleDFSQuery() throws IOException {
|
||||
assertAcked(prepareCreate("test")
|
||||
.addMapping("doc", jsonBuilder()
|
||||
.addMapping("_doc", jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("doc")
|
||||
.startObject("_doc")
|
||||
.startObject("_routing")
|
||||
.field("required", true)
|
||||
.endObject()
|
||||
|
@ -1513,13 +1513,13 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
);
|
||||
|
||||
|
||||
client().prepareIndex("test", "doc", "1").setRouting("Y").setSource("online", false, "bs", "Y", "ts",
|
||||
client().prepareIndex("test", "_doc", "1").setRouting("Y").setSource("online", false, "bs", "Y", "ts",
|
||||
System.currentTimeMillis() - 100, "type", "s").get();
|
||||
client().prepareIndex("test", "doc", "2").setRouting("X").setSource("online", true, "bs", "X", "ts",
|
||||
client().prepareIndex("test", "_doc", "2").setRouting("X").setSource("online", true, "bs", "X", "ts",
|
||||
System.currentTimeMillis() - 10000000, "type", "s").get();
|
||||
client().prepareIndex("test", "doc", "3").setRouting(randomAlphaOfLength(2))
|
||||
client().prepareIndex("test", "_doc", "3").setRouting(randomAlphaOfLength(2))
|
||||
.setSource("online", false, "ts", System.currentTimeMillis() - 100, "type", "bs").get();
|
||||
client().prepareIndex("test", "doc", "4").setRouting(randomAlphaOfLength(2))
|
||||
client().prepareIndex("test", "_doc", "4").setRouting(randomAlphaOfLength(2))
|
||||
.setSource("online", true, "ts", System.currentTimeMillis() - 123123, "type", "bs").get();
|
||||
refresh();
|
||||
|
||||
|
@ -1666,8 +1666,8 @@ public class SearchQueryIT extends ESIntegTestCase {
|
|||
public void testQueryStringWithSlopAndFields() {
|
||||
assertAcked(prepareCreate("test"));
|
||||
|
||||
client().prepareIndex("test", "doc", "1").setSource("desc", "one two three", "type", "customer").get();
|
||||
client().prepareIndex("test", "doc", "2").setSource("desc", "one two three", "type", "product").get();
|
||||
client().prepareIndex("test", "_doc", "1").setSource("desc", "one two three", "type", "customer").get();
|
||||
client().prepareIndex("test", "_doc", "2").setSource("desc", "one two three", "type", "product").get();
|
||||
refresh();
|
||||
{
|
||||
SearchResponse searchResponse = client().prepareSearch("test").setQuery(QueryBuilders.queryStringQuery("\"one two\"").defaultField("desc")).get();
|
||||
|
|
|
@ -292,8 +292,8 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
|
|||
// Issue #7967
|
||||
public void testLenientFlagBeingTooLenient() throws Exception {
|
||||
indexRandom(true,
|
||||
client().prepareIndex("test", "doc", "1").setSource("num", 1, "body", "foo bar baz"),
|
||||
client().prepareIndex("test", "doc", "2").setSource("num", 2, "body", "eggplant spaghetti lasagna"));
|
||||
client().prepareIndex("test", "_doc", "1").setSource("num", 1, "body", "foo bar baz"),
|
||||
client().prepareIndex("test", "_doc", "2").setSource("num", 2, "body", "eggplant spaghetti lasagna"));
|
||||
|
||||
BoolQueryBuilder q = boolQuery().should(simpleQueryStringQuery("bar").field("num").field("body").lenient(true));
|
||||
SearchResponse resp = client().prepareSearch("test").setQuery(q).get();
|
||||
|
@ -386,9 +386,9 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
|
|||
ensureGreen("test");
|
||||
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo bar baz"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "2").setSource("f2", "Bar"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "3").setSource("f3", "foo bar baz"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource("f1", "foo bar baz"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "2").setSource("f2", "Bar"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "3").setSource("f3", "foo bar baz"));
|
||||
indexRandom(true, false, reqs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get();
|
||||
|
@ -410,8 +410,8 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
|
|||
ensureGreen("test");
|
||||
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo", "f_date", "2015/09/02"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar", "f_date", "2015/09/01"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource("f1", "foo", "f_date", "2015/09/02"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "2").setSource("f1", "bar", "f_date", "2015/09/01"));
|
||||
indexRandom(true, false, reqs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo bar")).get();
|
||||
|
@ -437,11 +437,11 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
|
|||
ensureGreen("test");
|
||||
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource("f1", "foo",
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource("f1", "foo",
|
||||
"f_date", "2015/09/02",
|
||||
"f_float", "1.7",
|
||||
"f_ip", "127.0.0.1"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar",
|
||||
reqs.add(client().prepareIndex("test", "_doc", "2").setSource("f1", "bar",
|
||||
"f_date", "2015/09/01",
|
||||
"f_float", "1.8",
|
||||
"f_ip", "127.0.0.2"));
|
||||
|
@ -471,7 +471,7 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
|
|||
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
String docBody = copyToStringFromClasspath("/org/elasticsearch/search/query/all-example-document.json");
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource(docBody, XContentType.JSON));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource(docBody, XContentType.JSON));
|
||||
indexRandom(true, false, reqs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get();
|
||||
|
@ -516,9 +516,9 @@ public class SimpleQueryStringIT extends ESIntegTestCase {
|
|||
ensureGreen("test");
|
||||
|
||||
List<IndexRequestBuilder> reqs = new ArrayList<>();
|
||||
reqs.add(client().prepareIndex("test", "doc", "1").setSource("f2", "Foo Bar"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "2").setSource("f1", "bar"));
|
||||
reqs.add(client().prepareIndex("test", "doc", "3").setSource("f1", "foo bar"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "1").setSource("f2", "Foo Bar"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "2").setSource("f1", "bar"));
|
||||
reqs.add(client().prepareIndex("test", "_doc", "3").setSource("f1", "foo bar"));
|
||||
indexRandom(true, false, reqs);
|
||||
|
||||
SearchResponse resp = client().prepareSearch("test").setQuery(simpleQueryStringQuery("foo")).get();
|
||||
|
|
|
@ -66,7 +66,7 @@ public class MinThreadsSnapshotRestoreIT extends AbstractSnapshotIntegTestCase {
|
|||
final String index = "test-idx1";
|
||||
assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0)));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
index(index, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(index, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
final String snapshot1 = "test-snap1";
|
||||
|
@ -74,7 +74,7 @@ public class MinThreadsSnapshotRestoreIT extends AbstractSnapshotIntegTestCase {
|
|||
final String index2 = "test-idx2";
|
||||
assertAcked(prepareCreate(index2, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0)));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
index(index2, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(index2, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
final String snapshot2 = "test-snap2";
|
||||
|
@ -120,7 +120,7 @@ public class MinThreadsSnapshotRestoreIT extends AbstractSnapshotIntegTestCase {
|
|||
final String index = "test-idx";
|
||||
assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0)));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
index(index, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(index, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
final String snapshot1 = "test-snap1";
|
||||
|
@ -166,7 +166,7 @@ public class MinThreadsSnapshotRestoreIT extends AbstractSnapshotIntegTestCase {
|
|||
final String index = "test-idx";
|
||||
assertAcked(prepareCreate(index, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0)));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
index(index, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(index, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
final String snapshot1 = "test-snap1";
|
||||
|
@ -174,7 +174,7 @@ public class MinThreadsSnapshotRestoreIT extends AbstractSnapshotIntegTestCase {
|
|||
final String index2 = "test-idx2";
|
||||
assertAcked(prepareCreate(index2, 1, Settings.builder().put("number_of_shards", 1).put("number_of_replicas", 0)));
|
||||
for (int i = 0; i < 10; i++) {
|
||||
index(index2, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(index2, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
final String snapshot2 = "test-snap2";
|
||||
|
|
|
@ -173,9 +173,9 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
|
||||
index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
|
||||
index("test-idx-1", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-2", "_doc", Integer.toString(i), "foo", "baz" + i);
|
||||
index("test-idx-3", "_doc", Integer.toString(i), "foo", "baz" + i);
|
||||
}
|
||||
refresh();
|
||||
assertHitCount(client.prepareSearch("test-idx-1").setSize(0).get(), 100L);
|
||||
|
@ -225,13 +225,13 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> delete some data");
|
||||
for (int i = 0; i < 50; i++) {
|
||||
client.prepareDelete("test-idx-1", "doc", Integer.toString(i)).get();
|
||||
client.prepareDelete("test-idx-1", "_doc", Integer.toString(i)).get();
|
||||
}
|
||||
for (int i = 50; i < 100; i++) {
|
||||
client.prepareDelete("test-idx-2", "doc", Integer.toString(i)).get();
|
||||
client.prepareDelete("test-idx-2", "_doc", Integer.toString(i)).get();
|
||||
}
|
||||
for (int i = 0; i < 100; i += 2) {
|
||||
client.prepareDelete("test-idx-3", "doc", Integer.toString(i)).get();
|
||||
client.prepareDelete("test-idx-3", "_doc", Integer.toString(i)).get();
|
||||
}
|
||||
assertAllSuccessful(refresh());
|
||||
assertHitCount(client.prepareSearch("test-idx-1").setSize(0).get(), 50L);
|
||||
|
@ -553,9 +553,9 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
assertThat(client.admin().indices()
|
||||
.preparePutTemplate("test-template")
|
||||
.setPatterns(Collections.singletonList("te*"))
|
||||
.addMapping("doc", XContentFactory.jsonBuilder()
|
||||
.addMapping("_doc", XContentFactory.jsonBuilder()
|
||||
.startObject()
|
||||
.startObject("doc")
|
||||
.startObject("_doc")
|
||||
.startObject("properties")
|
||||
.startObject("field1")
|
||||
.field("type", "text")
|
||||
|
@ -665,7 +665,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -723,7 +723,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -772,7 +772,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -836,7 +836,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -895,7 +895,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -1003,7 +1003,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
// index some documents
|
||||
final int nbDocs = scaledRandomIntBetween(10, 100);
|
||||
for (int i = 0; i < nbDocs; i++) {
|
||||
index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(indexName, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
flushAndRefresh(indexName);
|
||||
assertThat(client().prepareSearch(indexName).setSize(0).get().getHits().getTotalHits(), equalTo((long) nbDocs));
|
||||
|
@ -1095,7 +1095,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -1184,7 +1184,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
logger.info("--> creating {} snapshots ", numberOfSnapshots);
|
||||
for (int i = 0; i < numberOfSnapshots; i++) {
|
||||
for (int j = 0; j < 10; j++) {
|
||||
index("test-idx", "doc", Integer.toString(i * 10 + j), "foo", "bar" + i * 10 + j);
|
||||
index("test-idx", "_doc", Integer.toString(i * 10 + j), "foo", "bar" + i * 10 + j);
|
||||
}
|
||||
refresh();
|
||||
logger.info("--> snapshot {}", i);
|
||||
|
@ -1237,8 +1237,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
createIndex("test-idx-1", "test-idx-2");
|
||||
logger.info("--> indexing some data");
|
||||
indexRandom(true,
|
||||
client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"));
|
||||
client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar"));
|
||||
|
||||
logger.info("--> creating snapshot");
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
|
||||
|
@ -1274,8 +1274,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
createIndex("test-idx-1", "test-idx-2");
|
||||
logger.info("--> indexing some data");
|
||||
indexRandom(true,
|
||||
client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"));
|
||||
client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar"));
|
||||
|
||||
logger.info("--> creating snapshot");
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
|
||||
|
@ -1307,8 +1307,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
createIndex("test-idx-1", "test-idx-2");
|
||||
logger.info("--> indexing some data");
|
||||
indexRandom(true,
|
||||
client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"));
|
||||
client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar"));
|
||||
|
||||
logger.info("--> creating snapshot");
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
|
||||
|
@ -1341,8 +1341,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
createIndex("test-idx-1", "test-idx-2");
|
||||
logger.info("--> indexing some data");
|
||||
indexRandom(true,
|
||||
client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"));
|
||||
client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar"));
|
||||
|
||||
logger.info("--> creating snapshot");
|
||||
client().admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1")
|
||||
|
@ -1435,8 +1435,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-2", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-1", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-2", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -1551,7 +1551,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -1614,7 +1614,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -1695,7 +1695,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
|
||||
|
@ -1754,7 +1754,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -1810,7 +1810,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -1923,7 +1923,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -1963,7 +1963,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
final int numdocs = randomIntBetween(10, 100);
|
||||
IndexRequestBuilder[] builders = new IndexRequestBuilder[numdocs];
|
||||
for (int i = 0; i < builders.length; i++) {
|
||||
builders[i] = client().prepareIndex("test", "doc", Integer.toString(i)).setSource("foo", "bar" + i);
|
||||
builders[i] = client().prepareIndex("test", "_doc", Integer.toString(i)).setSource("foo", "bar" + i);
|
||||
}
|
||||
indexRandom(true, builders);
|
||||
flushAndRefresh();
|
||||
|
@ -1993,7 +1993,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
}
|
||||
}
|
||||
|
||||
client().prepareDelete("test", "doc", "1").get();
|
||||
client().prepareDelete("test", "_doc", "1").get();
|
||||
CreateSnapshotResponse createSnapshotResponseThird = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-2").setWaitForCompletion(true).setIndices("test").get();
|
||||
assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), greaterThan(0));
|
||||
assertThat(createSnapshotResponseThird.getSnapshotInfo().successfulShards(), equalTo(createSnapshotResponseThird.getSnapshotInfo().totalShards()));
|
||||
|
@ -2241,9 +2241,9 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
|
||||
index("test-idx-3", "doc", Integer.toString(i), "foo", "baz" + i);
|
||||
index("test-idx-1", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-2", "_doc", Integer.toString(i), "foo", "baz" + i);
|
||||
index("test-idx-3", "_doc", Integer.toString(i), "foo", "baz" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -2329,8 +2329,8 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index("test-idx-1", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-2", "doc", Integer.toString(i), "foo", "baz" + i);
|
||||
index("test-idx-1", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-2", "_doc", Integer.toString(i), "foo", "baz" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx-1").setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -2392,7 +2392,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < 100; i++) {
|
||||
index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(indexName, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch(indexName).setSize(0).get().getHits().getTotalHits(), equalTo(100L));
|
||||
|
@ -2555,9 +2555,9 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
createIndex("test-idx-1", "test-idx-2", "test-idx-3");
|
||||
logger.info("--> indexing some data");
|
||||
indexRandom(true,
|
||||
client().prepareIndex("test-idx-1", "doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-3", "doc").setSource("foo", "bar"));
|
||||
client().prepareIndex("test-idx-1", "_doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-2", "_doc").setSource("foo", "bar"),
|
||||
client().prepareIndex("test-idx-3", "_doc").setSource("foo", "bar"));
|
||||
|
||||
logger.info("--> creating 2 snapshots");
|
||||
CreateSnapshotResponse createSnapshotResponse = client.admin().cluster().prepareCreateSnapshot("test-repo", "test-snap-1").setWaitForCompletion(true).setIndices("test-idx-*").get();
|
||||
|
@ -2608,7 +2608,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
createIndex(indexName);
|
||||
ensureGreen();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(indexName, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
|
||||
|
@ -2625,7 +2625,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> index more documents");
|
||||
for (int i = 10; i < 20; i++) {
|
||||
index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(indexName, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
|
||||
|
@ -2690,7 +2690,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
assertAcked(prepareCreate(indexName, 1, Settings.builder().put("number_of_replicas", 0)));
|
||||
ensureGreen();
|
||||
for (int i = 0; i < 10; i++) {
|
||||
index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(indexName, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
|
||||
|
@ -2731,7 +2731,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
logger.info("--> take another snapshot to be in-progress");
|
||||
// add documents so there are data files to block on
|
||||
for (int i = 10; i < 20; i++) {
|
||||
index(indexName, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(indexName, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
|
||||
|
@ -2821,7 +2821,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
|
||||
logger.info("--> indexing some data");
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
index(index, "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index(index, "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
|
||||
|
@ -2884,7 +2884,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
ensureGreen();
|
||||
final int numDocs = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
index("test-idx", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
assertThat(client.prepareSearch("test-idx").setSize(0).get().getHits().getTotalHits(), equalTo((long) numDocs));
|
||||
|
@ -2940,7 +2940,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
ensureGreen();
|
||||
final int numDocs = randomIntBetween(1, 5);
|
||||
for (int i = 0; i < numDocs; i++) {
|
||||
index("test-idx-good", "doc", Integer.toString(i), "foo", "bar" + i);
|
||||
index("test-idx-good", "_doc", Integer.toString(i), "foo", "bar" + i);
|
||||
}
|
||||
refresh();
|
||||
|
||||
|
@ -3019,7 +3019,7 @@ public class SharedClusterSnapshotRestoreIT extends AbstractSnapshotIntegTestCas
|
|||
if (randomBoolean()) {
|
||||
final int numDocs = randomIntBetween(1, 5);
|
||||
for (int k = 0; k < numDocs; k++) {
|
||||
index("test-idx-" + j, "doc", Integer.toString(k), "foo", "bar" + k);
|
||||
index("test-idx-" + j, "_doc", Integer.toString(k), "foo", "bar" + k);
|
||||
}
|
||||
refresh();
|
||||
}
|
||||
|
|
|
@ -44,7 +44,7 @@ import static org.elasticsearch.search.aggregations.AggregationBuilders.terms;
|
|||
|
||||
public class SharedSignificantTermsTestMethods {
|
||||
public static final String INDEX_NAME = "testidx";
|
||||
public static final String DOC_TYPE = "doc";
|
||||
public static final String DOC_TYPE = "_doc";
|
||||
public static final String TEXT_FIELD = "text";
|
||||
public static final String CLASS_FIELD = "class";
|
||||
|
||||
|
@ -82,7 +82,7 @@ public class SharedSignificantTermsTestMethods {
|
|||
textMappings += ",fielddata=true";
|
||||
}
|
||||
assertAcked(testCase.prepareCreate(INDEX_NAME).setSettings(settings, XContentType.JSON)
|
||||
.addMapping("doc", "text", textMappings, CLASS_FIELD, "type=keyword"));
|
||||
.addMapping("_doc", "text", textMappings, CLASS_FIELD, "type=keyword"));
|
||||
String[] gb = {"0", "1"};
|
||||
List<IndexRequestBuilder> indexRequestBuilderList = new ArrayList<>();
|
||||
indexRequestBuilderList.add(client().prepareIndex(INDEX_NAME, DOC_TYPE, "1")
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
}
|
||||
},
|
||||
"mappings": {
|
||||
"doc": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"f1": {"type": "text"},
|
||||
"f2": {"type": "keyword"},
|
||||
|
|
|
@ -70,7 +70,7 @@ Closure setupTwitter = { String name, int count ->
|
|||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
mappings:
|
||||
tweet:
|
||||
_doc:
|
||||
properties:
|
||||
user:
|
||||
type: keyword
|
||||
|
@ -82,7 +82,7 @@ Closure setupTwitter = { String name, int count ->
|
|||
- do:
|
||||
bulk:
|
||||
index: twitter
|
||||
type: tweet
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |'''
|
||||
for (int i = 0; i < count; i++) {
|
||||
|
@ -134,7 +134,7 @@ buildRestTests.setups['ledger'] = '''
|
|||
number_of_shards: 2
|
||||
number_of_replicas: 1
|
||||
mappings:
|
||||
sale:
|
||||
_doc:
|
||||
properties:
|
||||
type:
|
||||
type: keyword
|
||||
|
@ -143,7 +143,7 @@ buildRestTests.setups['ledger'] = '''
|
|||
- do:
|
||||
bulk:
|
||||
index: ledger
|
||||
type: sale
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |
|
||||
{"index":{}}
|
||||
|
@ -167,14 +167,14 @@ buildRestTests.setups['sales'] = '''
|
|||
number_of_shards: 2
|
||||
number_of_replicas: 1
|
||||
mappings:
|
||||
sale:
|
||||
_doc:
|
||||
properties:
|
||||
type:
|
||||
type: keyword
|
||||
- do:
|
||||
bulk:
|
||||
index: sales
|
||||
type: sale
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |
|
||||
{"index":{}}
|
||||
|
@ -204,7 +204,7 @@ buildRestTests.setups['bank'] = '''
|
|||
- do:
|
||||
bulk:
|
||||
index: bank
|
||||
type: account
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |
|
||||
#bank_data#
|
||||
|
@ -231,7 +231,7 @@ buildRestTests.setups['range_index'] = '''
|
|||
number_of_shards: 2
|
||||
number_of_replicas: 1
|
||||
mappings:
|
||||
my_type:
|
||||
_doc:
|
||||
properties:
|
||||
expected_attendees:
|
||||
type: integer_range
|
||||
|
@ -241,7 +241,7 @@ buildRestTests.setups['range_index'] = '''
|
|||
- do:
|
||||
bulk:
|
||||
index: range_index
|
||||
type: my_type
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |
|
||||
{"index":{"_id": 1}}
|
||||
|
@ -271,7 +271,7 @@ buildRestTests.setups['stackoverflow'] = '''
|
|||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
mappings:
|
||||
question:
|
||||
_doc:
|
||||
properties:
|
||||
author:
|
||||
type: keyword
|
||||
|
@ -280,7 +280,7 @@ buildRestTests.setups['stackoverflow'] = '''
|
|||
- do:
|
||||
bulk:
|
||||
index: stackoverflow
|
||||
type: question
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |'''
|
||||
|
||||
|
@ -326,7 +326,7 @@ buildRestTests.setups['news'] = '''
|
|||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
mappings:
|
||||
question:
|
||||
_doc:
|
||||
properties:
|
||||
source:
|
||||
type: keyword
|
||||
|
@ -335,7 +335,7 @@ buildRestTests.setups['news'] = '''
|
|||
- do:
|
||||
bulk:
|
||||
index: news
|
||||
type: article
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |'''
|
||||
|
||||
|
@ -379,14 +379,14 @@ buildRestTests.setups['exams'] = '''
|
|||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
mappings:
|
||||
exam:
|
||||
_doc:
|
||||
properties:
|
||||
grade:
|
||||
type: byte
|
||||
- do:
|
||||
bulk:
|
||||
index: exams
|
||||
type: exam
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |
|
||||
{"index":{}}
|
||||
|
@ -444,7 +444,7 @@ buildRestTests.setups['analyze_sample'] = '''
|
|||
type: custom
|
||||
filter: [lowercase]
|
||||
mappings:
|
||||
tweet:
|
||||
_doc:
|
||||
properties:
|
||||
obj1.field1:
|
||||
type: text'''
|
||||
|
@ -459,14 +459,14 @@ buildRestTests.setups['latency'] = '''
|
|||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
mappings:
|
||||
data:
|
||||
_doc:
|
||||
properties:
|
||||
load_time:
|
||||
type: long
|
||||
- do:
|
||||
bulk:
|
||||
index: latency
|
||||
type: data
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |'''
|
||||
|
||||
|
@ -491,14 +491,14 @@ buildRestTests.setups['iprange'] = '''
|
|||
number_of_shards: 1
|
||||
number_of_replicas: 1
|
||||
mappings:
|
||||
data:
|
||||
_doc:
|
||||
properties:
|
||||
ip:
|
||||
type: ip
|
||||
- do:
|
||||
bulk:
|
||||
index: ip_addresses
|
||||
type: data
|
||||
type: _doc
|
||||
refresh: true
|
||||
body: |'''
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ Example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /emails/message/_bulk?refresh
|
||||
PUT /emails/_doc/_bulk?refresh
|
||||
{ "index" : { "_id" : 1 } }
|
||||
{ "accounts" : ["hillary", "sidney"]}
|
||||
{ "index" : { "_id" : 2 } }
|
||||
|
@ -41,7 +41,7 @@ PUT /emails/message/_bulk?refresh
|
|||
{ "index" : { "_id" : 3 } }
|
||||
{ "accounts" : ["vladimir", "donald"]}
|
||||
|
||||
GET emails/message/_search
|
||||
GET emails/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
|
|
@ -14,7 +14,7 @@ For example, let's say we have an index of questions and answers. The answer typ
|
|||
PUT child_example
|
||||
{
|
||||
"mappings": {
|
||||
"doc": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"join": {
|
||||
"type": "join",
|
||||
|
@ -37,7 +37,7 @@ An example of a question document:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT child_example/doc/1
|
||||
PUT child_example/_doc/1
|
||||
{
|
||||
"join": {
|
||||
"name": "question"
|
||||
|
@ -58,7 +58,7 @@ Examples of `answer` documents:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT child_example/doc/2?routing=1
|
||||
PUT child_example/_doc/2?routing=1
|
||||
{
|
||||
"join": {
|
||||
"name": "answer",
|
||||
|
@ -73,7 +73,7 @@ PUT child_example/doc/2?routing=1
|
|||
"creation_date": "2009-05-04T13:45:37.030"
|
||||
}
|
||||
|
||||
PUT child_example/doc/3?routing=1&refresh
|
||||
PUT child_example/_doc/3?routing=1&refresh
|
||||
{
|
||||
"join": {
|
||||
"name": "answer",
|
||||
|
|
|
@ -125,12 +125,12 @@ Consider the following example:
|
|||
|
||||
[source,js]
|
||||
---------------------------------
|
||||
PUT my_index/log/1?refresh
|
||||
PUT my_index/_doc/1?refresh
|
||||
{
|
||||
"date": "2015-10-01T00:30:00Z"
|
||||
}
|
||||
|
||||
PUT my_index/log/2?refresh
|
||||
PUT my_index/_doc/2?refresh
|
||||
{
|
||||
"date": "2015-10-01T01:30:00Z"
|
||||
}
|
||||
|
@ -247,12 +247,12 @@ to run from 6am to 6am:
|
|||
|
||||
[source,js]
|
||||
-----------------------------
|
||||
PUT my_index/log/1?refresh
|
||||
PUT my_index/_doc/1?refresh
|
||||
{
|
||||
"date": "2015-10-01T05:30:00Z"
|
||||
}
|
||||
|
||||
PUT my_index/log/2?refresh
|
||||
PUT my_index/_doc/2?refresh
|
||||
{
|
||||
"date": "2015-10-01T06:30:00Z"
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ Example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /logs/message/_bulk?refresh
|
||||
PUT /logs/_doc/_bulk?refresh
|
||||
{ "index" : { "_id" : 1 } }
|
||||
{ "body" : "warning: page could not be rendered" }
|
||||
{ "index" : { "_id" : 2 } }
|
||||
|
@ -134,7 +134,7 @@ The following snippet shows a response where the `other` bucket is requested to
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT logs/message/4?refresh
|
||||
PUT logs/_doc/4?refresh
|
||||
{
|
||||
"body": "info: user Bob logged out"
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ Example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /ip_addresses/data/_search
|
||||
GET /ip_addresses/_search
|
||||
{
|
||||
"size": 10,
|
||||
"aggs" : {
|
||||
|
@ -55,7 +55,7 @@ IP ranges can also be defined as CIDR masks:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /ip_addresses/data/_search
|
||||
GET /ip_addresses/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
@ -109,7 +109,7 @@ Setting the `keyed` flag to `true` will associate a unique string key with each
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /ip_addresses/data/_search
|
||||
GET /ip_addresses/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs": {
|
||||
|
@ -158,7 +158,7 @@ It is also possible to customize the key for each range:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /ip_addresses/data/_search
|
||||
GET /ip_addresses/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs": {
|
||||
|
@ -201,4 +201,4 @@ Response:
|
|||
}
|
||||
}
|
||||
--------------------------------------------------
|
||||
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
|
||||
// TESTRESPONSE[s/\.\.\./"took": $body.took,"timed_out": false,"_shards": $body._shards,"hits": $body.hits,/]
|
||||
|
|
|
@ -17,13 +17,13 @@ The `matrix_stats` aggregation is a numeric aggregation that computes the follow
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /statistics/doc/0
|
||||
PUT /statistics/_doc/0
|
||||
{"poverty": 24.0, "income": 50000.0}
|
||||
|
||||
PUT /statistics/doc/1
|
||||
PUT /statistics/_doc/1
|
||||
{"poverty": 13.0, "income": 95687.0}
|
||||
|
||||
PUT /statistics/doc/2
|
||||
PUT /statistics/_doc/2
|
||||
{"poverty": 69.0, "income": 7890.0}
|
||||
|
||||
POST /_refresh
|
||||
|
|
|
@ -50,7 +50,7 @@ POST /sales/_search?size=0
|
|||
"aggs" : {
|
||||
"type_count" : {
|
||||
"cardinality" : {
|
||||
"field" : "type",
|
||||
"field" : "_doc",
|
||||
"precision_threshold": 100 <1>
|
||||
}
|
||||
}
|
||||
|
@ -207,7 +207,7 @@ POST /sales/_search?size=0
|
|||
"script" : {
|
||||
"id": "my_script",
|
||||
"params": {
|
||||
"type_field": "type",
|
||||
"type_field": "_doc",
|
||||
"promoted_field": "promoted"
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ Let's look at a range of percentiles representing load time:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
@ -78,7 +78,7 @@ must be a value between 0-100 inclusive):
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
@ -101,7 +101,7 @@ By default the `keyed` flag is set to `true` which associates a unique string ke
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs": {
|
||||
|
@ -170,7 +170,7 @@ a script to convert them on-the-fly:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
@ -199,7 +199,7 @@ This will interpret the `script` parameter as an `inline` script with the `painl
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
@ -264,7 +264,7 @@ This balance can be controlled using a `compression` parameter:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
@ -315,7 +315,7 @@ The HDR Histogram can be used by specifying the `method` parameter in the reques
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
@ -348,7 +348,7 @@ had a value.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
|
|
@ -24,7 +24,7 @@ Let's look at a range of percentiles representing load time:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
@ -69,7 +69,7 @@ By default the `keyed` flag is set to `true` associates a unique string key with
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs": {
|
||||
|
@ -120,7 +120,7 @@ a script to convert them on-the-fly:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
@ -149,7 +149,7 @@ This will interpret the `script` parameter as an `inline` script with the `painl
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
@ -185,7 +185,7 @@ The HDR Histogram can be used by specifying the `method` parameter in the reques
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET latency/data/_search
|
||||
GET latency/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs" : {
|
||||
|
|
|
@ -151,7 +151,7 @@ Imagine a situation where you index the following documents into an index with 2
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /transactions/stock/_bulk?refresh
|
||||
PUT /transactions/_doc/_bulk?refresh
|
||||
{"index":{"_id":1}}
|
||||
{"type": "sale","amount": 80}
|
||||
{"index":{"_id":2}}
|
||||
|
|
|
@ -86,7 +86,7 @@ Possible response:
|
|||
"hits": [
|
||||
{
|
||||
"_index": "sales",
|
||||
"_type": "sale",
|
||||
"_type": "_doc",
|
||||
"_id": "AVnNBmauCQpcRyxw6ChK",
|
||||
"_source": {
|
||||
"date": "2015/03/01 00:00:00",
|
||||
|
@ -111,7 +111,7 @@ Possible response:
|
|||
"hits": [
|
||||
{
|
||||
"_index": "sales",
|
||||
"_type": "sale",
|
||||
"_type": "_doc",
|
||||
"_id": "AVnNBmauCQpcRyxw6ChL",
|
||||
"_source": {
|
||||
"date": "2015/03/01 00:00:00",
|
||||
|
@ -136,7 +136,7 @@ Possible response:
|
|||
"hits": [
|
||||
{
|
||||
"_index": "sales",
|
||||
"_type": "sale",
|
||||
"_type": "_doc",
|
||||
"_id": "AVnNBmatCQpcRyxw6ChH",
|
||||
"_source": {
|
||||
"date": "2015/01/01 00:00:00",
|
||||
|
@ -234,7 +234,7 @@ Let's see how it works with a real sample. Considering the following mapping:
|
|||
PUT /sales
|
||||
{
|
||||
"mappings": {
|
||||
"product" : {
|
||||
"_doc" : {
|
||||
"properties" : {
|
||||
"tags" : { "type" : "keyword" },
|
||||
"comments" : { <1>
|
||||
|
@ -256,7 +256,7 @@ And some documents:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /sales/product/1?refresh
|
||||
PUT /sales/_doc/1?refresh
|
||||
{
|
||||
"tags": ["car", "auto"],
|
||||
"comments": [
|
||||
|
@ -324,7 +324,7 @@ Top hits response snippet with a nested hit, which resides in the first slot of
|
|||
"hits": [
|
||||
{
|
||||
"_index": "sales",
|
||||
"_type" : "product",
|
||||
"_type" : "_doc",
|
||||
"_id": "1",
|
||||
"_nested": {
|
||||
"field": "comments", <1>
|
||||
|
@ -392,4 +392,4 @@ the second slow of the `nested_child_field` field:
|
|||
}
|
||||
...
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
// NOTCONSOLE
|
||||
|
|
|
@ -17,7 +17,7 @@ setting `size=0`. For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/tweet/_search
|
||||
GET /twitter/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggregations": {
|
||||
|
@ -44,7 +44,7 @@ Consider this example where we want to associate the color blue with our `terms`
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/tweet/_search
|
||||
GET /twitter/_search
|
||||
{
|
||||
"size": 0,
|
||||
"aggs": {
|
||||
|
@ -96,7 +96,7 @@ Considering the following <<search-aggregations-bucket-datehistogram-aggregation
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/tweet/_search?typed_keys
|
||||
GET /twitter/_search?typed_keys
|
||||
{
|
||||
"aggregations": {
|
||||
"tweets_over_time": {
|
||||
|
@ -138,7 +138,7 @@ In the response, the aggregations names will be changed to respectively `date_hi
|
|||
"hits" : [
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "tweet",
|
||||
"_type": "_doc",
|
||||
"_id": "0",
|
||||
"_score": 1.0,
|
||||
"_source": {
|
||||
|
|
|
@ -39,7 +39,7 @@ Each <<text,`text`>> field in a mapping can specify its own
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "text",
|
||||
|
|
|
@ -21,7 +21,7 @@ PUT my_index
|
|||
}
|
||||
},
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"my_text": {
|
||||
"type": "text",
|
||||
|
|
|
@ -125,7 +125,7 @@ PUT my_index
|
|||
}
|
||||
},
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"text": {
|
||||
"type": "text",
|
||||
|
@ -205,7 +205,7 @@ the length of the original text:
|
|||
|
||||
[source,js]
|
||||
----------------------------
|
||||
PUT my_index/my_type/1?refresh
|
||||
PUT my_index/_doc/1?refresh
|
||||
{
|
||||
"text": "The fooBarBaz method"
|
||||
}
|
||||
|
@ -246,7 +246,7 @@ The output from the above is:
|
|||
"hits": [
|
||||
{
|
||||
"_index": "my_index",
|
||||
"_type": "my_type",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_score": 0.2876821,
|
||||
"_source": {
|
||||
|
|
|
@ -58,7 +58,7 @@ PUT my_index
|
|||
}
|
||||
},
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"my_text": {
|
||||
"type": "text",
|
||||
|
|
|
@ -250,7 +250,7 @@ PUT my_index
|
|||
}
|
||||
},
|
||||
"mappings": {
|
||||
"doc": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "text",
|
||||
|
@ -262,7 +262,7 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/doc/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"title": "Quick Foxes" <1>
|
||||
}
|
||||
|
@ -305,7 +305,7 @@ GET my_index/_search
|
|||
"hits": [
|
||||
{
|
||||
"_index": "my_index",
|
||||
"_type": "doc",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_score": 0.5753642,
|
||||
"_source": {
|
||||
|
|
|
@ -57,10 +57,10 @@ newlines. Example:
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
$ cat requests
|
||||
{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
|
||||
{ "index" : { "_index" : "test", "_type" : "_doc", "_id" : "1" } }
|
||||
{ "field1" : "value1" }
|
||||
$ curl -s -H "Content-Type: application/x-ndjson" -XPOST localhost:9200/_bulk --data-binary "@requests"; echo
|
||||
{"took":7, "errors": false, "items":[{"index":{"_index":"test","_type":"type1","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
|
||||
{"took":7, "errors": false, "items":[{"index":{"_index":"test","_type":"_doc","_id":"1","_version":1,"result":"created","forced_refresh":false}}]}
|
||||
--------------------------------------------------
|
||||
// NOTCONSOLE
|
||||
// Not converting to console because this shows how curl works
|
||||
|
@ -72,12 +72,12 @@ example of a correct sequence of bulk commands:
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _bulk
|
||||
{ "index" : { "_index" : "test", "_type" : "type1", "_id" : "1" } }
|
||||
{ "index" : { "_index" : "test", "_type" : "_doc", "_id" : "1" } }
|
||||
{ "field1" : "value1" }
|
||||
{ "delete" : { "_index" : "test", "_type" : "type1", "_id" : "2" } }
|
||||
{ "create" : { "_index" : "test", "_type" : "type1", "_id" : "3" } }
|
||||
{ "delete" : { "_index" : "test", "_type" : "_doc", "_id" : "2" } }
|
||||
{ "create" : { "_index" : "test", "_type" : "_doc", "_id" : "3" } }
|
||||
{ "field1" : "value3" }
|
||||
{ "update" : {"_id" : "1", "_type" : "type1", "_index" : "test"} }
|
||||
{ "update" : {"_id" : "1", "_type" : "_doc", "_index" : "test"} }
|
||||
{ "doc" : {"field2" : "value2"} }
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
@ -93,7 +93,7 @@ The result of this bulk operation is:
|
|||
{
|
||||
"index": {
|
||||
"_index": "test",
|
||||
"_type": "type1",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_version": 1,
|
||||
"result": "created",
|
||||
|
@ -110,7 +110,7 @@ The result of this bulk operation is:
|
|||
{
|
||||
"delete": {
|
||||
"_index": "test",
|
||||
"_type": "type1",
|
||||
"_type": "_doc",
|
||||
"_id": "2",
|
||||
"_version": 1,
|
||||
"result": "not_found",
|
||||
|
@ -127,7 +127,7 @@ The result of this bulk operation is:
|
|||
{
|
||||
"create": {
|
||||
"_index": "test",
|
||||
"_type": "type1",
|
||||
"_type": "_doc",
|
||||
"_id": "3",
|
||||
"_version": 1,
|
||||
"result": "created",
|
||||
|
@ -144,7 +144,7 @@ The result of this bulk operation is:
|
|||
{
|
||||
"update": {
|
||||
"_index": "test",
|
||||
"_type": "type1",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_version": 2,
|
||||
"result": "updated",
|
||||
|
@ -246,15 +246,15 @@ the options. Example with update actions:
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST _bulk
|
||||
{ "update" : {"_id" : "1", "_type" : "type1", "_index" : "index1", "retry_on_conflict" : 3} }
|
||||
{ "update" : {"_id" : "1", "_type" : "_doc", "_index" : "index1", "retry_on_conflict" : 3} }
|
||||
{ "doc" : {"field" : "value"} }
|
||||
{ "update" : { "_id" : "0", "_type" : "type1", "_index" : "index1", "retry_on_conflict" : 3} }
|
||||
{ "update" : { "_id" : "0", "_type" : "_doc", "_index" : "index1", "retry_on_conflict" : 3} }
|
||||
{ "script" : { "source": "ctx._source.counter += params.param1", "lang" : "painless", "params" : {"param1" : 1}}, "upsert" : {"counter" : 1}}
|
||||
{ "update" : {"_id" : "2", "_type" : "type1", "_index" : "index1", "retry_on_conflict" : 3} }
|
||||
{ "update" : {"_id" : "2", "_type" : "_doc", "_index" : "index1", "retry_on_conflict" : 3} }
|
||||
{ "doc" : {"field" : "value"}, "doc_as_upsert" : true }
|
||||
{ "update" : {"_id" : "3", "_type" : "type1", "_index" : "index1", "_source" : true} }
|
||||
{ "update" : {"_id" : "3", "_type" : "_doc", "_index" : "index1", "_source" : true} }
|
||||
{ "doc" : {"field" : "value"} }
|
||||
{ "update" : {"_id" : "4", "_type" : "type1", "_index" : "index1"} }
|
||||
{ "update" : {"_id" : "4", "_type" : "_doc", "_index" : "index1"} }
|
||||
{ "doc" : {"field" : "value"}, "_source": true}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
|
|
@ -76,7 +76,7 @@ will only delete `tweet` documents from the `twitter` index:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST twitter/tweet/_delete_by_query?conflicts=proceed
|
||||
POST twitter/_doc/_delete_by_query?conflicts=proceed
|
||||
{
|
||||
"query": {
|
||||
"match_all": {}
|
||||
|
|
|
@ -8,7 +8,7 @@ from an index called twitter, under a type called tweet, with id valued
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /twitter/tweet/1
|
||||
DELETE /twitter/_doc/1
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -24,7 +24,7 @@ The result of the above delete operation is:
|
|||
"successful" : 2
|
||||
},
|
||||
"_index" : "twitter",
|
||||
"_type" : "tweet",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"_version" : 2,
|
||||
"_primary_term": 1,
|
||||
|
@ -59,7 +59,7 @@ Example to delete with routing
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /twitter/tweet/1?routing=kimchy
|
||||
PUT /twitter/_doc/1?routing=kimchy
|
||||
{
|
||||
"test": "test"
|
||||
}
|
||||
|
@ -70,7 +70,7 @@ PUT /twitter/tweet/1?routing=kimchy
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /twitter/tweet/1?routing=kimchy
|
||||
DELETE /twitter/_doc/1?routing=kimchy
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
@ -136,7 +136,7 @@ to 5 minutes:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /twitter/tweet/1?timeout=5m
|
||||
DELETE /twitter/_doc/1?timeout=5m
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
|
|
@ -7,7 +7,7 @@ twitter, under a type called tweet, with id valued 0:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/0
|
||||
GET twitter/_doc/0
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -18,7 +18,7 @@ The result of the above get operation is:
|
|||
--------------------------------------------------
|
||||
{
|
||||
"_index" : "twitter",
|
||||
"_type" : "tweet",
|
||||
"_type" : "_doc",
|
||||
"_id" : "0",
|
||||
"_version" : 1,
|
||||
"found": true,
|
||||
|
@ -42,7 +42,7 @@ The API also allows to check for the existence of a document using
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
HEAD twitter/tweet/0
|
||||
HEAD twitter/_doc/0
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -68,7 +68,7 @@ You can turn off `_source` retrieval by using the `_source` parameter:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/0?_source=false
|
||||
GET twitter/_doc/0?_source=false
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -80,7 +80,7 @@ of fields or wildcard expressions. Example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/0?_source_include=*.id&_source_exclude=entities
|
||||
GET twitter/_doc/0?_source_include=*.id&_source_exclude=entities
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -89,7 +89,7 @@ If you only want to specify includes, you can use a shorter notation:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/0?_source=*.id,retweeted
|
||||
GET twitter/_doc/0?_source=*.id,retweeted
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -108,7 +108,7 @@ Consider for instance the following mapping:
|
|||
PUT twitter
|
||||
{
|
||||
"mappings": {
|
||||
"tweet": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"counter": {
|
||||
"type": "integer",
|
||||
|
@ -129,7 +129,7 @@ Now we can add a document:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter/tweet/1
|
||||
PUT twitter/_doc/1
|
||||
{
|
||||
"counter" : 1,
|
||||
"tags" : ["red"]
|
||||
|
@ -142,7 +142,7 @@ PUT twitter/tweet/1
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/1?stored_fields=tags,counter
|
||||
GET twitter/_doc/1?stored_fields=tags,counter
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
@ -153,7 +153,7 @@ The result of the above get operation is:
|
|||
--------------------------------------------------
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "tweet",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_version": 1,
|
||||
"found": true,
|
||||
|
@ -174,7 +174,7 @@ It is also possible to retrieve metadata fields like the `_routing` field:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter/tweet/2?routing=user1
|
||||
PUT twitter/_doc/2?routing=user1
|
||||
{
|
||||
"counter" : 1,
|
||||
"tags" : ["white"]
|
||||
|
@ -185,7 +185,7 @@ PUT twitter/tweet/2?routing=user1
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/2?routing=user1&stored_fields=tags,counter
|
||||
GET twitter/_doc/2?routing=user1&stored_fields=tags,counter
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
@ -196,7 +196,7 @@ The result of the above get operation is:
|
|||
--------------------------------------------------
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "tweet",
|
||||
"_type": "_doc",
|
||||
"_id": "2",
|
||||
"_version": 1,
|
||||
"_routing": "user1",
|
||||
|
@ -223,7 +223,7 @@ without any additional content around it. For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/1/_source
|
||||
GET twitter/_doc/1/_source
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
@ -232,7 +232,7 @@ You can also use the same source filtering parameters to control which parts of
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/1/_source?_source_include=*.id&_source_exclude=entities'
|
||||
GET twitter/_doc/1/_source?_source_include=*.id&_source_exclude=entities'
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
@ -242,7 +242,7 @@ An existing document will not have a _source if it is disabled in the <<mapping-
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
HEAD twitter/tweet/1/_source
|
||||
HEAD twitter/_doc/1/_source
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
@ -256,7 +256,7 @@ a document, the routing value should also be provided. For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET twitter/tweet/2?routing=user1
|
||||
GET twitter/_doc/2?routing=user1
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
|
|
@ -5,11 +5,11 @@ IMPORTANT: See <<removal-of-types>>.
|
|||
|
||||
The index API adds or updates a typed JSON document in a specific index,
|
||||
making it searchable. The following example inserts the JSON document
|
||||
into the "twitter" index, under a type called "tweet" with an id of 1:
|
||||
into the "twitter" index, under a type called "_doc" with an id of 1:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter/tweet/1
|
||||
PUT twitter/_doc/1
|
||||
{
|
||||
"user" : "kimchy",
|
||||
"post_date" : "2009-11-15T14:12:12",
|
||||
|
@ -29,7 +29,7 @@ The result of the above index operation is:
|
|||
"successful" : 2
|
||||
},
|
||||
"_index" : "twitter",
|
||||
"_type" : "tweet",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"_version" : 1,
|
||||
"_seq_no" : 0,
|
||||
|
@ -96,7 +96,7 @@ meantime. For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter/tweet/1?version=2
|
||||
PUT twitter/_doc/1?version=2
|
||||
{
|
||||
"message" : "elasticsearch now has versioning support, double cool!"
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ Here is an example of using the `op_type` parameter:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter/tweet/1?op_type=create
|
||||
PUT twitter/_doc/1?op_type=create
|
||||
{
|
||||
"user" : "kimchy",
|
||||
"post_date" : "2009-11-15T14:12:12",
|
||||
|
@ -189,7 +189,7 @@ Another option to specify `create` is to use the following uri:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter/tweet/1/_create
|
||||
PUT twitter/_doc/1/_create
|
||||
{
|
||||
"user" : "kimchy",
|
||||
"post_date" : "2009-11-15T14:12:12",
|
||||
|
@ -208,7 +208,7 @@ will automatically be set to `create`. Here is an example (note the
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST twitter/tweet/
|
||||
POST twitter/_doc/
|
||||
{
|
||||
"user" : "kimchy",
|
||||
"post_date" : "2009-11-15T14:12:12",
|
||||
|
@ -228,7 +228,7 @@ The result of the above index operation is:
|
|||
"successful" : 2
|
||||
},
|
||||
"_index" : "twitter",
|
||||
"_type" : "tweet",
|
||||
"_type" : "_doc",
|
||||
"_id" : "6a8ca01c-7896-48e9-81cc-9f70661fcb32",
|
||||
"_version" : 1,
|
||||
"_seq_no" : 0,
|
||||
|
@ -258,7 +258,7 @@ POST twitter/tweet?routing=kimchy
|
|||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
In the example above, the "tweet" document is routed to a shard based on
|
||||
In the example above, the "_doc" document is routed to a shard based on
|
||||
the `routing` parameter provided: "kimchy".
|
||||
|
||||
When setting up explicit mapping, the `_routing` field can be optionally
|
||||
|
@ -372,7 +372,7 @@ to 5 minutes:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT twitter/tweet/1?timeout=5m
|
||||
PUT twitter/_doc/1?timeout=5m
|
||||
{
|
||||
"user" : "kimchy",
|
||||
"post_date" : "2009-11-15T14:12:12",
|
||||
|
|
|
@ -14,12 +14,12 @@ GET /_mget
|
|||
"docs" : [
|
||||
{
|
||||
"_index" : "test",
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1"
|
||||
},
|
||||
{
|
||||
"_index" : "test",
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "2"
|
||||
}
|
||||
]
|
||||
|
@ -36,11 +36,11 @@ GET /test/_mget
|
|||
{
|
||||
"docs" : [
|
||||
{
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1"
|
||||
},
|
||||
{
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "2"
|
||||
}
|
||||
]
|
||||
|
@ -78,48 +78,6 @@ GET /test/type/_mget
|
|||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
[float]
|
||||
[[mget-type]]
|
||||
=== Optional Type
|
||||
|
||||
The mget API allows for `_type` to be optional. Set it to `_all` or leave it empty in order
|
||||
to fetch the first document matching the id across all types.
|
||||
|
||||
If you don't set the type and have many documents sharing the same `_id`, you will end up
|
||||
getting only the first matching document.
|
||||
|
||||
For example, if you have a document 1 within typeA and typeB then following request
|
||||
will give you back only the same document twice:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /test/_mget
|
||||
{
|
||||
"ids" : ["1", "1"]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
You need in that case to explicitly set the `_type`:
|
||||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /test/_mget/
|
||||
{
|
||||
"docs" : [
|
||||
{
|
||||
"_type":"typeA",
|
||||
"_id" : "1"
|
||||
},
|
||||
{
|
||||
"_type":"typeB",
|
||||
"_id" : "1"
|
||||
}
|
||||
]
|
||||
}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
[float]
|
||||
[[mget-source-filtering]]
|
||||
=== Source filtering
|
||||
|
@ -139,19 +97,19 @@ GET /_mget
|
|||
"docs" : [
|
||||
{
|
||||
"_index" : "test",
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"_source" : false
|
||||
},
|
||||
{
|
||||
"_index" : "test",
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "2",
|
||||
"_source" : ["field3", "field4"]
|
||||
},
|
||||
{
|
||||
"_index" : "test",
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "3",
|
||||
"_source" : {
|
||||
"include": ["user"],
|
||||
|
@ -178,13 +136,13 @@ GET /_mget
|
|||
"docs" : [
|
||||
{
|
||||
"_index" : "test",
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"stored_fields" : ["field1", "field2"]
|
||||
},
|
||||
{
|
||||
"_index" : "test",
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "2",
|
||||
"stored_fields" : ["field3", "field4"]
|
||||
}
|
||||
|
@ -228,13 +186,13 @@ GET /_mget?routing=key1
|
|||
"docs" : [
|
||||
{
|
||||
"_index" : "test",
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"routing" : "key2"
|
||||
},
|
||||
{
|
||||
"_index" : "test",
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "2"
|
||||
}
|
||||
]
|
||||
|
|
|
@ -17,13 +17,13 @@ POST /_mtermvectors
|
|||
"docs": [
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "tweet",
|
||||
"_type": "_doc",
|
||||
"_id": "2",
|
||||
"term_statistics": true
|
||||
},
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "tweet",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"fields": [
|
||||
"message"
|
||||
|
@ -46,7 +46,7 @@ POST /twitter/_mtermvectors
|
|||
{
|
||||
"docs": [
|
||||
{
|
||||
"_type": "tweet",
|
||||
"_type": "_doc",
|
||||
"_id": "2",
|
||||
"fields": [
|
||||
"message"
|
||||
|
@ -54,7 +54,7 @@ POST /twitter/_mtermvectors
|
|||
"term_statistics": true
|
||||
},
|
||||
{
|
||||
"_type": "tweet",
|
||||
"_type": "_doc",
|
||||
"_id": "1"
|
||||
}
|
||||
]
|
||||
|
@ -67,7 +67,7 @@ And type:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/tweet/_mtermvectors
|
||||
POST /twitter/_doc/_mtermvectors
|
||||
{
|
||||
"docs": [
|
||||
{
|
||||
|
@ -90,7 +90,7 @@ If all requested documents are on same index and have same type and also the par
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /twitter/tweet/_mtermvectors
|
||||
POST /twitter/_doc/_mtermvectors
|
||||
{
|
||||
"ids" : ["1", "2"],
|
||||
"parameters": {
|
||||
|
@ -115,7 +115,7 @@ POST /_mtermvectors
|
|||
"docs": [
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "tweet",
|
||||
"_type": "_doc",
|
||||
"doc" : {
|
||||
"user" : "John Doe",
|
||||
"message" : "twitter test test test"
|
||||
|
@ -123,7 +123,7 @@ POST /_mtermvectors
|
|||
},
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "test",
|
||||
"_type": "_doc",
|
||||
"doc" : {
|
||||
"user" : "Jane Doe",
|
||||
"message" : "Another twitter test ..."
|
||||
|
|
|
@ -84,9 +84,9 @@ These will create a document and immediately refresh the index so it is visible:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test/test/1?refresh
|
||||
PUT /test/_doc/1?refresh
|
||||
{"test": "test"}
|
||||
PUT /test/test/2?refresh=true
|
||||
PUT /test/_doc/2?refresh=true
|
||||
{"test": "test"}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
@ -96,9 +96,9 @@ search:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test/test/3
|
||||
PUT /test/_doc/3
|
||||
{"test": "test"}
|
||||
PUT /test/test/4?refresh=false
|
||||
PUT /test/_doc/4?refresh=false
|
||||
{"test": "test"}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
@ -107,7 +107,7 @@ This will create a document and wait for it to become visible for search:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /test/test/4?refresh=wait_for
|
||||
PUT /test/_doc/4?refresh=wait_for
|
||||
{"test": "test"}
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
|
|
@ -144,7 +144,7 @@ POST _reindex
|
|||
{
|
||||
"source": {
|
||||
"index": "twitter",
|
||||
"type": "tweet",
|
||||
"type": "_doc",
|
||||
"query": {
|
||||
"term": {
|
||||
"user": "kimchy"
|
||||
|
@ -173,7 +173,7 @@ POST _reindex
|
|||
{
|
||||
"source": {
|
||||
"index": ["twitter", "blog"],
|
||||
"type": ["tweet", "post"]
|
||||
"type": ["_doc", "post"]
|
||||
},
|
||||
"dest": {
|
||||
"index": "all_together"
|
||||
|
@ -236,7 +236,7 @@ POST _reindex
|
|||
{
|
||||
"source": {
|
||||
"index": "twitter",
|
||||
"_source": ["user", "tweet"]
|
||||
"_source": ["user", "_doc"]
|
||||
},
|
||||
"dest": {
|
||||
"index": "new_twitter"
|
||||
|
@ -796,7 +796,7 @@ create an index containing documents that look like this:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/test/1?refresh
|
||||
POST test/_doc/1?refresh
|
||||
{
|
||||
"text": "words words",
|
||||
"flag": "foo"
|
||||
|
@ -829,7 +829,7 @@ Now you can get the new document:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET test2/test/1
|
||||
GET test2/_doc/1
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
@ -842,7 +842,7 @@ and it'll look like:
|
|||
"found": true,
|
||||
"_id": "1",
|
||||
"_index": "test2",
|
||||
"_type": "test",
|
||||
"_type": "_doc",
|
||||
"_version": 1,
|
||||
"_source": {
|
||||
"text": "words words",
|
||||
|
@ -1023,9 +1023,9 @@ Assuming you have indices consisting of documents as following:
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------------------
|
||||
PUT metricbeat-2016.05.30/beat/1?refresh
|
||||
PUT metricbeat-2016.05.30/_doc/1?refresh
|
||||
{"system.cpu.idle.pct": 0.908}
|
||||
PUT metricbeat-2016.05.31/beat/1?refresh
|
||||
PUT metricbeat-2016.05.31/_doc/1?refresh
|
||||
{"system.cpu.idle.pct": 0.105}
|
||||
----------------------------------------------------------------
|
||||
// CONSOLE
|
||||
|
@ -1061,8 +1061,8 @@ All documents from the previous metricbeat indices now can be found in the `*-1`
|
|||
|
||||
[source,js]
|
||||
----------------------------------------------------------------
|
||||
GET metricbeat-2016.05.30-1/beat/1
|
||||
GET metricbeat-2016.05.31-1/beat/1
|
||||
GET metricbeat-2016.05.30-1/_doc/1
|
||||
GET metricbeat-2016.05.31-1/_doc/1
|
||||
----------------------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
|
|
@ -8,7 +8,7 @@ realtime. This can be changed by setting `realtime` parameter to `false`.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/tweet/1/_termvectors
|
||||
GET /twitter/_doc/1/_termvectors
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -18,7 +18,7 @@ retrieved either with a parameter in the url
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/tweet/1/_termvectors?fields=message
|
||||
GET /twitter/_doc/1/_termvectors?fields=message
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -130,7 +130,7 @@ First, we create an index that stores term vectors, payloads etc. :
|
|||
--------------------------------------------------
|
||||
PUT /twitter/
|
||||
{ "mappings": {
|
||||
"tweet": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"text": {
|
||||
"type": "text",
|
||||
|
@ -172,13 +172,13 @@ Second, we add some documents:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /twitter/tweet/1
|
||||
PUT /twitter/_doc/1
|
||||
{
|
||||
"fullname" : "John Doe",
|
||||
"text" : "twitter test test test "
|
||||
}
|
||||
|
||||
PUT /twitter/tweet/2
|
||||
PUT /twitter/_doc/2
|
||||
{
|
||||
"fullname" : "Jane Doe",
|
||||
"text" : "Another twitter test ..."
|
||||
|
@ -192,7 +192,7 @@ The following request returns all information and statistics for field
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/tweet/1/_termvectors
|
||||
GET /twitter/_doc/1/_termvectors
|
||||
{
|
||||
"fields" : ["text"],
|
||||
"offsets" : true,
|
||||
|
@ -212,7 +212,7 @@ Response:
|
|||
{
|
||||
"_id": "1",
|
||||
"_index": "twitter",
|
||||
"_type": "tweet",
|
||||
"_type": "_doc",
|
||||
"_version": 1,
|
||||
"found": true,
|
||||
"took": 6,
|
||||
|
@ -280,7 +280,7 @@ Note that for the field `text`, the terms are not re-generated.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/tweet/1/_termvectors
|
||||
GET /twitter/_doc/1/_termvectors
|
||||
{
|
||||
"fields" : ["text", "some_field_without_term_vectors"],
|
||||
"offsets" : true,
|
||||
|
@ -306,7 +306,7 @@ mapping will be dynamically created.*
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/tweet/_termvectors
|
||||
GET /twitter/_doc/_termvectors
|
||||
{
|
||||
"doc" : {
|
||||
"fullname" : "John Doe",
|
||||
|
@ -329,7 +329,7 @@ vectors, the term vectors will be re-generated.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/tweet/_termvectors
|
||||
GET /twitter/_doc/_termvectors
|
||||
{
|
||||
"doc" : {
|
||||
"fullname" : "John Doe",
|
||||
|
@ -350,7 +350,7 @@ Response:
|
|||
--------------------------------------------------
|
||||
{
|
||||
"_index": "twitter",
|
||||
"_type": "tweet",
|
||||
"_type": "_doc",
|
||||
"_version": 0,
|
||||
"found": true,
|
||||
"took": 6,
|
||||
|
@ -396,7 +396,7 @@ their tf-idf must be too low.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /imdb/movies/_termvectors
|
||||
GET /imdb/_doc/_termvectors
|
||||
{
|
||||
"doc": {
|
||||
"plot": "When wealthy industrialist Tony Stark is forced to build an armored suit after a life-threatening incident, he ultimately decides to use its technology to fight against evil."
|
||||
|
@ -421,7 +421,7 @@ Response:
|
|||
--------------------------------------------------
|
||||
{
|
||||
"_index": "imdb",
|
||||
"_type": "movies",
|
||||
"_type": "_doc",
|
||||
"_version": 0,
|
||||
"found": true,
|
||||
"term_vectors": {
|
||||
|
|
|
@ -68,7 +68,7 @@ will only update `tweet` documents from the `twitter` index:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST twitter/tweet/_update_by_query?conflicts=proceed
|
||||
POST twitter/_doc/_update_by_query?conflicts=proceed
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -626,7 +626,7 @@ added a mapping value to pick up more fields from the data:
|
|||
PUT test
|
||||
{
|
||||
"mappings": {
|
||||
"test": {
|
||||
"_doc": {
|
||||
"dynamic": false, <1>
|
||||
"properties": {
|
||||
"text": {"type": "text"}
|
||||
|
@ -635,17 +635,17 @@ PUT test
|
|||
}
|
||||
}
|
||||
|
||||
POST test/test?refresh
|
||||
POST test/_doc?refresh
|
||||
{
|
||||
"text": "words words",
|
||||
"flag": "bar"
|
||||
}
|
||||
POST test/test?refresh
|
||||
POST test/_doc?refresh
|
||||
{
|
||||
"text": "words words",
|
||||
"flag": "foo"
|
||||
}
|
||||
PUT test/_mapping/test <2>
|
||||
PUT test/_mapping/_doc <2>
|
||||
{
|
||||
"properties": {
|
||||
"text": {"type": "text"},
|
||||
|
|
|
@ -17,7 +17,7 @@ For example, let's index a simple doc:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT test/type1/1
|
||||
PUT test/_doc/1
|
||||
{
|
||||
"counter" : 1,
|
||||
"tags" : ["red"]
|
||||
|
@ -32,7 +32,7 @@ Now, we can execute a script that would increment the counter:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/type1/1/_update
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"script" : {
|
||||
"source": "ctx._source.counter += params.count",
|
||||
|
@ -51,7 +51,7 @@ will still add it, since its a list):
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/type1/1/_update
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"script" : {
|
||||
"source": "ctx._source.tags.add(params.tag)",
|
||||
|
@ -73,7 +73,7 @@ We can also add a new field to the document:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/type1/1/_update
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"script" : "ctx._source.new_field = 'value_of_new_field'"
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ Or remove a field from the document:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/type1/1/_update
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"script" : "ctx._source.remove('new_field')"
|
||||
}
|
||||
|
@ -99,7 +99,7 @@ the doc if the `tags` field contain `green`, otherwise it does nothing
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/type1/1/_update
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"script" : {
|
||||
"source": "if (ctx._source.tags.contains(params.tag)) { ctx.op = 'delete' } else { ctx.op = 'none' }",
|
||||
|
@ -123,7 +123,7 @@ example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/type1/1/_update
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"doc" : {
|
||||
"name" : "new_name"
|
||||
|
@ -144,7 +144,7 @@ By default updates that don't change anything detect that they don't change anyt
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/type1/1/_update
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"doc" : {
|
||||
"name" : "new_name"
|
||||
|
@ -167,7 +167,7 @@ the request was ignored.
|
|||
"failed": 0
|
||||
},
|
||||
"_index": "test",
|
||||
"_type": "type1",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_version": 6,
|
||||
"result": "noop"
|
||||
|
@ -179,7 +179,7 @@ You can disable this behavior by setting "detect_noop": false like this:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/type1/1/_update
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"doc" : {
|
||||
"name" : "new_name"
|
||||
|
@ -200,7 +200,7 @@ will be inserted as a new document. If the document does exist, then the
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/type1/1/_update
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"script" : {
|
||||
"source": "ctx._source.counter += params.count",
|
||||
|
@ -255,7 +255,7 @@ value:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST test/type1/1/_update
|
||||
POST test/_doc/1/_update
|
||||
{
|
||||
"doc" : {
|
||||
"name" : "new_name"
|
||||
|
|
|
@ -380,7 +380,7 @@ Let's now put something into our customer index. We'll index a simple customer d
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /customer/doc/1?pretty
|
||||
PUT /customer/_doc/1?pretty
|
||||
{
|
||||
"name": "John Doe"
|
||||
}
|
||||
|
@ -393,7 +393,7 @@ And the response:
|
|||
--------------------------------------------------
|
||||
{
|
||||
"_index" : "customer",
|
||||
"_type" : "doc",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"_version" : 1,
|
||||
"result" : "created",
|
||||
|
@ -416,7 +416,7 @@ Let's now retrieve that document that we just indexed:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /customer/doc/1?pretty
|
||||
GET /customer/_doc/1?pretty
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
@ -427,7 +427,7 @@ And the response:
|
|||
--------------------------------------------------
|
||||
{
|
||||
"_index" : "customer",
|
||||
"_type" : "doc",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"_version" : 1,
|
||||
"found" : true,
|
||||
|
@ -465,11 +465,11 @@ Before we move on, let's take a closer look again at some of the API commands th
|
|||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /customer
|
||||
PUT /customer/doc/1
|
||||
PUT /customer/_doc/1
|
||||
{
|
||||
"name": "John Doe"
|
||||
}
|
||||
GET /customer/doc/1
|
||||
GET /customer/_doc/1
|
||||
DELETE /customer
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
@ -495,7 +495,7 @@ We've previously seen how we can index a single document. Let's recall that comm
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /customer/doc/1?pretty
|
||||
PUT /customer/_doc/1?pretty
|
||||
{
|
||||
"name": "John Doe"
|
||||
}
|
||||
|
@ -506,7 +506,7 @@ Again, the above will index the specified document into the customer index, with
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /customer/doc/1?pretty
|
||||
PUT /customer/_doc/1?pretty
|
||||
{
|
||||
"name": "Jane Doe"
|
||||
}
|
||||
|
@ -518,7 +518,7 @@ The above changes the name of the document with the ID of 1 from "John Doe" to "
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /customer/doc/2?pretty
|
||||
PUT /customer/_doc/2?pretty
|
||||
{
|
||||
"name": "Jane Doe"
|
||||
}
|
||||
|
@ -534,7 +534,7 @@ This example shows how to index a document without an explicit ID:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /customer/doc?pretty
|
||||
POST /customer/_doc?pretty
|
||||
{
|
||||
"name": "Jane Doe"
|
||||
}
|
||||
|
@ -552,7 +552,7 @@ This example shows how to update our previous document (ID of 1) by changing the
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /customer/doc/1/_update?pretty
|
||||
POST /customer/_doc/1/_update?pretty
|
||||
{
|
||||
"doc": { "name": "Jane Doe" }
|
||||
}
|
||||
|
@ -564,7 +564,7 @@ This example shows how to update our previous document (ID of 1) by changing the
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /customer/doc/1/_update?pretty
|
||||
POST /customer/_doc/1/_update?pretty
|
||||
{
|
||||
"doc": { "name": "Jane Doe", "age": 20 }
|
||||
}
|
||||
|
@ -576,7 +576,7 @@ Updates can also be performed by using simple scripts. This example uses a scrip
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /customer/doc/1/_update?pretty
|
||||
POST /customer/_doc/1/_update?pretty
|
||||
{
|
||||
"script" : "ctx._source.age += 5"
|
||||
}
|
||||
|
@ -594,7 +594,7 @@ Deleting a document is fairly straightforward. This example shows how to delete
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
DELETE /customer/doc/2?pretty
|
||||
DELETE /customer/_doc/2?pretty
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[continued]
|
||||
|
@ -611,7 +611,7 @@ As a quick example, the following call indexes two documents (ID 1 - John Doe an
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
POST /customer/doc/_bulk?pretty
|
||||
POST /customer/_doc/_bulk?pretty
|
||||
{"index":{"_id":"1"}}
|
||||
{"name": "John Doe" }
|
||||
{"index":{"_id":"2"}}
|
||||
|
@ -623,7 +623,7 @@ This example updates the first document (ID of 1) and then deletes the second do
|
|||
|
||||
[source,sh]
|
||||
--------------------------------------------------
|
||||
POST /customer/doc/_bulk?pretty
|
||||
POST /customer/_doc/_bulk?pretty
|
||||
{"update":{"_id":"1"}}
|
||||
{"doc": { "name": "John Doe becomes Jane Doe" } }
|
||||
{"delete":{"_id":"2"}}
|
||||
|
@ -696,7 +696,7 @@ yellow open bank l7sSYV2cQXmu6_4rJWVIww 5 1 1000 0 12
|
|||
// TESTRESPONSE[s/128.6kb/\\d+(\\.\\d+)?[mk]?b/]
|
||||
// TESTRESPONSE[s/l7sSYV2cQXmu6_4rJWVIww/.+/ _cat]
|
||||
|
||||
Which means that we just successfully bulk indexed 1000 documents into the bank index (under the account type).
|
||||
Which means that we just successfully bulk indexed 1000 documents into the bank index (under the `_doc` type).
|
||||
|
||||
=== The Search API
|
||||
|
||||
|
@ -731,14 +731,14 @@ And the response (partially shown):
|
|||
"max_score" : null,
|
||||
"hits" : [ {
|
||||
"_index" : "bank",
|
||||
"_type" : "account",
|
||||
"_type" : "_doc",
|
||||
"_id" : "0",
|
||||
"sort": [0],
|
||||
"_score" : null,
|
||||
"_source" : {"account_number":0,"balance":16623,"firstname":"Bradshaw","lastname":"Mckenzie","age":29,"gender":"F","address":"244 Columbus Place","employer":"Euron","email":"bradshawmckenzie@euron.com","city":"Hobucken","state":"CO"}
|
||||
}, {
|
||||
"_index" : "bank",
|
||||
"_type" : "account",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"sort": [1],
|
||||
"_score" : null,
|
||||
|
@ -799,14 +799,14 @@ to clutter the docs with it:
|
|||
"max_score": null,
|
||||
"hits" : [ {
|
||||
"_index" : "bank",
|
||||
"_type" : "account",
|
||||
"_type" : "_doc",
|
||||
"_id" : "0",
|
||||
"sort": [0],
|
||||
"_score": null,
|
||||
"_source" : {"account_number":0,"balance":16623,"firstname":"Bradshaw","lastname":"Mckenzie","age":29,"gender":"F","address":"244 Columbus Place","employer":"Euron","email":"bradshawmckenzie@euron.com","city":"Hobucken","state":"CO"}
|
||||
}, {
|
||||
"_index" : "bank",
|
||||
"_type" : "account",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"sort": [1],
|
||||
"_score": null,
|
||||
|
|
|
@ -24,7 +24,7 @@ PUT index
|
|||
}
|
||||
},
|
||||
"mappings": {
|
||||
"type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"body": {
|
||||
"type": "text",
|
||||
|
@ -41,12 +41,12 @@ PUT index
|
|||
}
|
||||
}
|
||||
|
||||
PUT index/type/1
|
||||
PUT index/_doc/1
|
||||
{
|
||||
"body": "Ski resort"
|
||||
}
|
||||
|
||||
PUT index/type/2
|
||||
PUT index/_doc/2
|
||||
{
|
||||
"body": "A pair of skis"
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ GET index/_search
|
|||
"hits": [
|
||||
{
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_id": "2",
|
||||
"_score": 0.2876821,
|
||||
"_source": {
|
||||
|
@ -98,7 +98,7 @@ GET index/_search
|
|||
},
|
||||
{
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_score": 0.2876821,
|
||||
"_source": {
|
||||
|
@ -147,7 +147,7 @@ GET index/_search
|
|||
"hits": [
|
||||
{
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_score": 0.2876821,
|
||||
"_source": {
|
||||
|
@ -204,7 +204,7 @@ GET index/_search
|
|||
"hits": [
|
||||
{
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_score": 0.2876821,
|
||||
"_source": {
|
||||
|
|
|
@ -49,7 +49,7 @@ For instance, if documents look like:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index/type/1
|
||||
PUT index/_doc/1
|
||||
{
|
||||
"designation": "spoon",
|
||||
"price": 13
|
||||
|
@ -88,7 +88,7 @@ should be mapped as a <<keyword,`keyword`>>:
|
|||
PUT index
|
||||
{
|
||||
"mappings": {
|
||||
"type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"price_range": {
|
||||
"type": "keyword"
|
||||
|
@ -98,7 +98,7 @@ PUT index
|
|||
}
|
||||
}
|
||||
|
||||
PUT index/type/1
|
||||
PUT index/_doc/1
|
||||
{
|
||||
"designation": "spoon",
|
||||
"price": 13,
|
||||
|
@ -152,7 +152,7 @@ For instance the below query:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT index/type/1
|
||||
PUT index/_doc/1
|
||||
{
|
||||
"my_date": "2016-05-11T16:30:55.328Z"
|
||||
}
|
||||
|
@ -284,7 +284,7 @@ eagerly at refresh-time by configuring mappings as described below:
|
|||
PUT index
|
||||
{
|
||||
"mappings": {
|
||||
"type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"foo": {
|
||||
"type": "keyword",
|
||||
|
|
|
@ -25,7 +25,7 @@ PUT twitter
|
|||
}
|
||||
},
|
||||
"mappings": {
|
||||
"tweet": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"date": {
|
||||
"type": "date"
|
||||
|
@ -53,7 +53,7 @@ PUT twitter
|
|||
}
|
||||
},
|
||||
"mappings": {
|
||||
"tweet": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"username": {
|
||||
"type": "keyword",
|
||||
|
|
|
@ -44,7 +44,7 @@ Here we configure the DFRSimilarity so it can be referenced as
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /index/_mapping/book
|
||||
PUT /index/_mapping/_doc
|
||||
{
|
||||
"properties" : {
|
||||
"title" : { "type" : "text", "similarity" : "my_similarity" }
|
||||
|
@ -197,7 +197,7 @@ PUT /index
|
|||
}
|
||||
},
|
||||
"mappings": {
|
||||
"doc": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"field": {
|
||||
"type": "text",
|
||||
|
@ -208,12 +208,12 @@ PUT /index
|
|||
}
|
||||
}
|
||||
|
||||
PUT /index/doc/1
|
||||
PUT /index/_doc/1
|
||||
{
|
||||
"field": "foo bar foo"
|
||||
}
|
||||
|
||||
PUT /index/doc/2
|
||||
PUT /index/_doc/2
|
||||
{
|
||||
"field": "bar baz"
|
||||
}
|
||||
|
@ -253,7 +253,7 @@ Which yields:
|
|||
"_shard": "[index][0]",
|
||||
"_node": "OzrdjxNtQGaqs4DmioFw9A",
|
||||
"_index": "index",
|
||||
"_type": "doc",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_score": 1.9508477,
|
||||
"_source": {
|
||||
|
@ -355,7 +355,7 @@ PUT /index
|
|||
}
|
||||
},
|
||||
"mappings": {
|
||||
"doc": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"field": {
|
||||
"type": "text",
|
||||
|
@ -372,12 +372,12 @@ PUT /index
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /index/doc/1
|
||||
PUT /index/_doc/1
|
||||
{
|
||||
"field": "foo bar foo"
|
||||
}
|
||||
|
||||
PUT /index/doc/2
|
||||
PUT /index/_doc/2
|
||||
{
|
||||
"field": "bar baz"
|
||||
}
|
||||
|
@ -416,7 +416,7 @@ GET /index/_search?explain=true
|
|||
"_shard": "[index][0]",
|
||||
"_node": "OzrdjxNtQGaqs4DmioFw9A",
|
||||
"_index": "index",
|
||||
"_type": "doc",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_score": 1.9508477,
|
||||
"_source": {
|
||||
|
|
|
@ -12,7 +12,7 @@ For example, consider the following mapping:
|
|||
PUT publications
|
||||
{
|
||||
"mappings": {
|
||||
"article": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"id": { "type": "text" },
|
||||
"title": { "type": "text"},
|
||||
|
@ -35,7 +35,7 @@ The following returns the mapping of the field `title` only:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET publications/_mapping/article/field/title
|
||||
GET publications/_mapping/_doc/field/title
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
|
@ -46,7 +46,7 @@ For which the response is:
|
|||
{
|
||||
"publications": {
|
||||
"mappings": {
|
||||
"article": {
|
||||
"_doc": {
|
||||
"title": {
|
||||
"full_name": "title",
|
||||
"mapping": {
|
||||
|
@ -76,9 +76,9 @@ following are some examples:
|
|||
--------------------------------------------------
|
||||
GET /twitter,kimchy/_mapping/field/message
|
||||
|
||||
GET /_all/_mapping/tweet,book/field/message,user.id
|
||||
GET /_all/_mapping/_doc,tweet,book/field/message,user.id
|
||||
|
||||
GET /_all/_mapping/tw*/field/*.id
|
||||
GET /_all/_mapping/_do*/field/*.id
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -93,7 +93,7 @@ For instance to select the `id` of the `author` field, you must use its full nam
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET publications/_mapping/article/field/author.id,abstract,name
|
||||
GET publications/_mapping/_doc/field/author.id,abstract,name
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
|
@ -104,7 +104,7 @@ returns:
|
|||
{
|
||||
"publications": {
|
||||
"mappings": {
|
||||
"article": {
|
||||
"_doc": {
|
||||
"author.id": {
|
||||
"full_name": "author.id",
|
||||
"mapping": {
|
||||
|
@ -132,7 +132,7 @@ The get field mapping API also supports wildcard notation.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET publications/_mapping/article/field/a*
|
||||
GET publications/_mapping/_doc/field/a*
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
||||
|
@ -143,7 +143,7 @@ returns:
|
|||
{
|
||||
"publications": {
|
||||
"mappings": {
|
||||
"article": {
|
||||
"_doc": {
|
||||
"author.name": {
|
||||
"full_name": "author.name",
|
||||
"mapping": {
|
||||
|
|
|
@ -6,7 +6,7 @@ index/type.
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /twitter/_mapping/tweet
|
||||
GET /twitter/_mapping/_doc
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
@ -23,9 +23,9 @@ following are some examples:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
GET /_mapping/tweet
|
||||
GET /_mapping/_doc
|
||||
|
||||
GET /_all/_mapping/tweet
|
||||
GET /_all/_mapping/_doc
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
// TEST[setup:twitter]
|
||||
|
|
|
@ -9,7 +9,7 @@ fields to an existing type:
|
|||
PUT twitter <1>
|
||||
{}
|
||||
|
||||
PUT twitter/_mapping/user <2>
|
||||
PUT twitter/_mapping/_doc <2>
|
||||
{
|
||||
"properties": {
|
||||
"name": {
|
||||
|
@ -18,7 +18,7 @@ PUT twitter/_mapping/user <2>
|
|||
}
|
||||
}
|
||||
|
||||
PUT twitter/_mapping/user <3>
|
||||
PUT twitter/_mapping/_doc <3>
|
||||
{
|
||||
"properties": {
|
||||
"email": {
|
||||
|
@ -48,7 +48,7 @@ PUT twitter-1
|
|||
PUT twitter-2
|
||||
|
||||
# Update both mappings
|
||||
PUT /twitter-1,twitter-2/_mapping/my_type <1>
|
||||
PUT /twitter-1,twitter-2/_mapping/_doc <1>
|
||||
{
|
||||
"properties": {
|
||||
"user_name": {
|
||||
|
@ -84,7 +84,7 @@ For example:
|
|||
PUT my_index <1>
|
||||
{
|
||||
"mappings": {
|
||||
"user": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"name": {
|
||||
"properties": {
|
||||
|
@ -101,7 +101,7 @@ PUT my_index <1>
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/_mapping/user
|
||||
PUT my_index/_mapping/_doc
|
||||
{
|
||||
"properties": {
|
||||
"name": {
|
||||
|
|
|
@ -106,7 +106,7 @@ PUT /%3Clogs-%7Bnow%2Fd%7D-1%3E <1>
|
|||
}
|
||||
}
|
||||
|
||||
PUT logs_write/log/1
|
||||
PUT logs_write/_doc/1
|
||||
{
|
||||
"message": "a dummy log"
|
||||
}
|
||||
|
|
|
@ -26,7 +26,7 @@ tell the ingest node which pipeline to use. For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my-index/my-type/my-id?pipeline=my_pipeline_id
|
||||
PUT my-index/_doc/my-id?pipeline=my_pipeline_id
|
||||
{
|
||||
"foo": "bar"
|
||||
}
|
||||
|
|
|
@ -250,7 +250,7 @@ POST _ingest/pipeline/_simulate
|
|||
"docs": [
|
||||
{
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_id": "id",
|
||||
"_source": {
|
||||
"foo": "bar"
|
||||
|
@ -258,7 +258,7 @@ POST _ingest/pipeline/_simulate
|
|||
},
|
||||
{
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_id": "id",
|
||||
"_source": {
|
||||
"foo": "rab"
|
||||
|
@ -279,7 +279,7 @@ Response:
|
|||
"doc": {
|
||||
"_id": "id",
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_source": {
|
||||
"field2": "_value",
|
||||
"foo": "bar"
|
||||
|
@ -293,7 +293,7 @@ Response:
|
|||
"doc": {
|
||||
"_id": "id",
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_source": {
|
||||
"field2": "_value",
|
||||
"foo": "rab"
|
||||
|
@ -343,7 +343,7 @@ POST _ingest/pipeline/_simulate?verbose
|
|||
"docs": [
|
||||
{
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_id": "id",
|
||||
"_source": {
|
||||
"foo": "bar"
|
||||
|
@ -351,7 +351,7 @@ POST _ingest/pipeline/_simulate?verbose
|
|||
},
|
||||
{
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_id": "id",
|
||||
"_source": {
|
||||
"foo": "rab"
|
||||
|
@ -374,7 +374,7 @@ Response:
|
|||
"doc": {
|
||||
"_id": "id",
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_source": {
|
||||
"field2": "_value2",
|
||||
"foo": "bar"
|
||||
|
@ -388,7 +388,7 @@ Response:
|
|||
"doc": {
|
||||
"_id": "id",
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_source": {
|
||||
"field3": "_value3",
|
||||
"field2": "_value2",
|
||||
|
@ -407,7 +407,7 @@ Response:
|
|||
"doc": {
|
||||
"_id": "id",
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_source": {
|
||||
"field2": "_value2",
|
||||
"foo": "rab"
|
||||
|
@ -421,7 +421,7 @@ Response:
|
|||
"doc": {
|
||||
"_id": "id",
|
||||
"_index": "index",
|
||||
"_type": "type",
|
||||
"_type": "_doc",
|
||||
"_source": {
|
||||
"field3": "_value3",
|
||||
"field2": "_value2",
|
||||
|
@ -917,7 +917,7 @@ Using that pipeline for an index request:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT /myindex/type/1?pipeline=monthlyindex
|
||||
PUT /myindex/_doc/1?pipeline=monthlyindex
|
||||
{
|
||||
"date1" : "2016-04-25T12:02:01.789Z"
|
||||
}
|
||||
|
@ -929,7 +929,7 @@ PUT /myindex/type/1?pipeline=monthlyindex
|
|||
--------------------------------------------------
|
||||
{
|
||||
"_index" : "myindex-2016-04-01",
|
||||
"_type" : "type",
|
||||
"_type" : "_doc",
|
||||
"_id" : "1",
|
||||
"_version" : 1,
|
||||
"result" : "created",
|
||||
|
@ -1824,11 +1824,11 @@ was provided in the original index request:
|
|||
--------------------------------------------------
|
||||
PUT _ingest/pipeline/my_index
|
||||
{
|
||||
"description": "use index:my_index and type:my_type",
|
||||
"description": "use index:my_index and type:_doc",
|
||||
"processors": [
|
||||
{
|
||||
"script": {
|
||||
"source": " ctx._index = 'my_index'; ctx._type = 'my_type' "
|
||||
"source": " ctx._index = 'my_index'; ctx._type = '_doc' "
|
||||
}
|
||||
}
|
||||
]
|
||||
|
@ -1840,7 +1840,7 @@ Using the above pipeline, we can attempt to index a document into the `any_index
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT any_index/any_type/1?pipeline=my_index
|
||||
PUT any_index/_doc/1?pipeline=my_index
|
||||
{
|
||||
"message": "text"
|
||||
}
|
||||
|
@ -1854,7 +1854,7 @@ The response from the above index request:
|
|||
--------------------------------------------------
|
||||
{
|
||||
"_index": "my_index",
|
||||
"_type": "my_type",
|
||||
"_type": "_doc",
|
||||
"_id": "1",
|
||||
"_version": 1,
|
||||
"result": "created",
|
||||
|
|
|
@ -9,11 +9,11 @@ type, and fields will spring to life automatically:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT data/counters/1 <1>
|
||||
PUT data/_doc/1 <1>
|
||||
{ "count": 5 }
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> Creates the `data` index, the `counters` mapping type, and a field
|
||||
<1> Creates the `data` index, the `_doc` mapping type, and a field
|
||||
called `count` with datatype `long`.
|
||||
|
||||
The automatic detection and addition of new fields is called
|
||||
|
|
|
@ -48,7 +48,7 @@ For example:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"create_date": "2015/09/02"
|
||||
}
|
||||
|
@ -69,13 +69,13 @@ Dynamic date detection can be disabled by setting `date_detection` to `false`:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"date_detection": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1 <1>
|
||||
PUT my_index/_doc/1 <1>
|
||||
{
|
||||
"create": "2015/09/02"
|
||||
}
|
||||
|
@ -94,13 +94,13 @@ own <<mapping-date-format,date formats>>:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"dynamic_date_formats": ["MM/dd/yyyy"]
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"create_date": "09/25/2015"
|
||||
}
|
||||
|
@ -122,13 +122,13 @@ correct solution is to map these fields explicitly, but numeric detection
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"numeric_detection": true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"my_float": "1.0", <1>
|
||||
"my_integer": "1" <2>
|
||||
|
|
|
@ -61,7 +61,7 @@ could use the following template:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"integers": {
|
||||
|
@ -90,7 +90,7 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"my_integer": 5, <1>
|
||||
"my_string": "Some string" <2>
|
||||
|
@ -117,7 +117,7 @@ fields:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"longs_as_strings": {
|
||||
|
@ -134,7 +134,7 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"long_num": "5", <1>
|
||||
"long_text": "foo" <2>
|
||||
|
@ -173,7 +173,7 @@ top-level `full_name` field, except for the `middle` field:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"full_name": {
|
||||
|
@ -190,7 +190,7 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"name": {
|
||||
"first": "Alice",
|
||||
|
@ -214,7 +214,7 @@ field, and disables <<doc-values,`doc_values`>> for all non-string fields:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"named_analyzers": {
|
||||
|
@ -240,7 +240,7 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"english": "Some English text", <1>
|
||||
"count": 5 <2>
|
||||
|
@ -268,7 +268,7 @@ you will have to search on the exact same value that was indexed.
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings_as_keywords": {
|
||||
|
@ -298,7 +298,7 @@ before 5.0):
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings_as_text": {
|
||||
|
@ -326,7 +326,7 @@ disable the storage of these scoring factors in the index and save some space.
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"strings_as_keywords": {
|
||||
|
@ -367,7 +367,7 @@ maybe gain some indexing speed:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"dynamic_templates": [
|
||||
{
|
||||
"unindexed_longs": {
|
||||
|
|
|
@ -11,12 +11,12 @@ The value of the `_field_names` field is accessible in queries:
|
|||
[source,js]
|
||||
--------------------------
|
||||
# Example documents
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"title": "This is a document"
|
||||
}
|
||||
|
||||
PUT my_index/my_type/2?refresh=true
|
||||
PUT my_index/_doc/2?refresh=true
|
||||
{
|
||||
"title": "This is another document",
|
||||
"body": "This document has a body"
|
||||
|
@ -48,7 +48,7 @@ disable this field if you want to optimize for indexing speed and do not need
|
|||
PUT tweets
|
||||
{
|
||||
"mappings": {
|
||||
"tweet": {
|
||||
"_doc": {
|
||||
"_field_names": {
|
||||
"enabled": false
|
||||
}
|
||||
|
|
|
@ -15,12 +15,12 @@ The value of the `_id` field is accessible in certain queries (`term`,
|
|||
[source,js]
|
||||
--------------------------
|
||||
# Example documents
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"text": "Document with ID 1"
|
||||
}
|
||||
|
||||
PUT my_index/my_type/2&refresh=true
|
||||
PUT my_index/_doc/2&refresh=true
|
||||
{
|
||||
"text": "Document with ID 2"
|
||||
}
|
||||
|
|
|
@ -16,12 +16,12 @@ but it does not support `prefix`, `wildcard`, `regexp`, or `fuzzy` queries.
|
|||
[source,js]
|
||||
--------------------------
|
||||
# Example documents
|
||||
PUT index_1/my_type/1
|
||||
PUT index_1/_doc/1
|
||||
{
|
||||
"text": "Document in index 1"
|
||||
}
|
||||
|
||||
PUT index_2/my_type/2?refresh=true
|
||||
PUT index_2/_doc/2?refresh=true
|
||||
{
|
||||
"text": "Document in index 2"
|
||||
}
|
||||
|
|
|
@ -13,12 +13,12 @@ value per document. For instance:
|
|||
|
||||
[source,js]
|
||||
------------------------------
|
||||
PUT my_index/my_type/1?routing=user1&refresh=true <1>
|
||||
PUT my_index/_doc/1?routing=user1&refresh=true <1>
|
||||
{
|
||||
"title": "This is a document"
|
||||
}
|
||||
|
||||
GET my_index/my_type/1?routing=user1 <2>
|
||||
GET my_index/_doc/1?routing=user1 <2>
|
||||
------------------------------
|
||||
// CONSOLE
|
||||
// TESTSETUP
|
||||
|
@ -82,7 +82,7 @@ custom `routing` value required for all CRUD operations:
|
|||
PUT my_index2
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"_routing": {
|
||||
"required": true <1>
|
||||
}
|
||||
|
@ -90,14 +90,14 @@ PUT my_index2
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index2/my_type/1 <2>
|
||||
PUT my_index2/_doc/1 <2>
|
||||
{
|
||||
"text": "No routing value provided"
|
||||
}
|
||||
------------------------------
|
||||
// CONSOLE
|
||||
// TEST[catch:bad_request]
|
||||
<1> Routing is required for `my_type` documents.
|
||||
<1> Routing is required for `_doc` documents.
|
||||
<2> This index request throws a `routing_missing_exception`.
|
||||
|
||||
==== Unique IDs with custom routing
|
||||
|
|
|
@ -16,7 +16,7 @@ within the index. For this reason, it can be disabled as follows:
|
|||
PUT tweets
|
||||
{
|
||||
"mappings": {
|
||||
"tweet": {
|
||||
"_doc": {
|
||||
"_source": {
|
||||
"enabled": false
|
||||
}
|
||||
|
@ -88,7 +88,7 @@ as follows:
|
|||
PUT logs
|
||||
{
|
||||
"mappings": {
|
||||
"event": {
|
||||
"_doc": {
|
||||
"_source": {
|
||||
"includes": [
|
||||
"*.count",
|
||||
|
@ -103,7 +103,7 @@ PUT logs
|
|||
}
|
||||
}
|
||||
|
||||
PUT logs/event/1
|
||||
PUT logs/_doc/1
|
||||
{
|
||||
"requests": {
|
||||
"count": 10,
|
||||
|
@ -119,7 +119,7 @@ PUT logs/event/1
|
|||
}
|
||||
}
|
||||
|
||||
GET logs/event/_search
|
||||
GET logs/_search
|
||||
{
|
||||
"query": {
|
||||
"match": {
|
||||
|
|
|
@ -14,7 +14,7 @@ scripts, and when sorting:
|
|||
--------------------------
|
||||
# Example documents
|
||||
|
||||
PUT my_index/doc/1?refresh=true
|
||||
PUT my_index/_doc/1?refresh=true
|
||||
{
|
||||
"text": "Document with type 'doc'"
|
||||
}
|
||||
|
@ -23,7 +23,7 @@ GET my_index/_search
|
|||
{
|
||||
"query": {
|
||||
"term": {
|
||||
"_type": "doc" <1>
|
||||
"_type": "_doc" <1>
|
||||
}
|
||||
},
|
||||
"aggs": {
|
||||
|
|
|
@ -13,12 +13,12 @@ and when sorting:
|
|||
[source,js]
|
||||
--------------------------
|
||||
# Example documents
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"text": "Document with ID 1"
|
||||
}
|
||||
|
||||
PUT my_index/my_type/2?refresh=true
|
||||
PUT my_index/_doc/2?refresh=true
|
||||
{
|
||||
"text": "Document with ID 2"
|
||||
}
|
||||
|
@ -31,7 +31,7 @@ GET my_index/_search
|
|||
{
|
||||
"query": {
|
||||
"terms": {
|
||||
"_uid": [ "my_type#1", "my_type#2" ] <1>
|
||||
"_uid": [ "_doc#1", "_doc#2" ] <1>
|
||||
}
|
||||
},
|
||||
"aggs": {
|
||||
|
|
|
@ -44,7 +44,7 @@ in the field mapping, as follows:
|
|||
PUT /my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"text": { <1>
|
||||
"type": "text",
|
||||
|
@ -123,7 +123,7 @@ PUT my_index
|
|||
}
|
||||
},
|
||||
"mappings":{
|
||||
"my_type":{
|
||||
"_doc":{
|
||||
"properties":{
|
||||
"title": {
|
||||
"type":"text",
|
||||
|
@ -136,17 +136,17 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"title":"The Quick Brown Fox"
|
||||
}
|
||||
|
||||
PUT my_index/my_type/2
|
||||
PUT my_index/_doc/2
|
||||
{
|
||||
"title":"A Quick Brown Fox"
|
||||
}
|
||||
|
||||
GET my_index/my_type/_search
|
||||
GET my_index/_search
|
||||
{
|
||||
"query":{
|
||||
"query_string":{
|
||||
|
|
|
@ -9,7 +9,7 @@ Individual fields can be _boosted_ automatically -- count more towards the relev
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "text",
|
||||
|
|
|
@ -20,7 +20,7 @@ For instance:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"number_one": {
|
||||
"type": "integer"
|
||||
|
@ -34,12 +34,12 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"number_one": "10" <1>
|
||||
}
|
||||
|
||||
PUT my_index/my_type/2
|
||||
PUT my_index/_doc/2
|
||||
{
|
||||
"number_two": "10" <2>
|
||||
}
|
||||
|
@ -67,7 +67,7 @@ PUT my_index
|
|||
"index.mapping.coerce": false
|
||||
},
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"number_one": {
|
||||
"type": "integer",
|
||||
|
@ -81,10 +81,10 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{ "number_one": "10" } <1>
|
||||
|
||||
PUT my_index/my_type/2
|
||||
PUT my_index/_doc/2
|
||||
{ "number_two": "10" } <2>
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
|
|
|
@ -11,7 +11,7 @@ the `full_name` field as follows:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"first_name": {
|
||||
"type": "text",
|
||||
|
@ -29,7 +29,7 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"first_name": "John",
|
||||
"last_name": "Smith"
|
||||
|
|
|
@ -26,7 +26,7 @@ value from a script, you can disable doc values in order to save disk space:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"status_code": { <1>
|
||||
"type": "keyword"
|
||||
|
|
|
@ -7,7 +7,7 @@ containing the new field. For instance:
|
|||
|
||||
[source,js]
|
||||
--------------------------------------------------
|
||||
PUT my_index/my_type/1 <1>
|
||||
PUT my_index/_doc/1 <1>
|
||||
{
|
||||
"username": "johnsmith",
|
||||
"name": {
|
||||
|
@ -18,7 +18,7 @@ PUT my_index/my_type/1 <1>
|
|||
|
||||
GET my_index/_mapping <2>
|
||||
|
||||
PUT my_index/my_type/2 <3>
|
||||
PUT my_index/_doc/2 <3>
|
||||
{
|
||||
"username": "marywhite",
|
||||
"email": "mary@white.com",
|
||||
|
@ -61,7 +61,7 @@ object or from the mapping type. For instance:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"dynamic": false, <1>
|
||||
"properties": {
|
||||
"user": { <2>
|
||||
|
|
|
@ -36,7 +36,7 @@ aggregations:
|
|||
|
||||
[source,js]
|
||||
------------
|
||||
PUT my_index/_mapping/my_type
|
||||
PUT my_index/_mapping/_doc
|
||||
{
|
||||
"properties": {
|
||||
"tags": {
|
||||
|
@ -59,7 +59,7 @@ time:
|
|||
|
||||
[source,js]
|
||||
------------
|
||||
PUT my_index/_mapping/my_type
|
||||
PUT my_index/_mapping/_doc
|
||||
{
|
||||
"properties": {
|
||||
"tags": {
|
||||
|
|
|
@ -18,7 +18,7 @@ in any other way:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"session": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"user_id": {
|
||||
"type": "keyword"
|
||||
|
@ -34,7 +34,7 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/session/session_1
|
||||
PUT my_index/_doc/session_1
|
||||
{
|
||||
"user_id": "kimchy",
|
||||
"session_data": { <2>
|
||||
|
@ -45,7 +45,7 @@ PUT my_index/session/session_1
|
|||
"last_updated": "2015-12-06T18:20:22"
|
||||
}
|
||||
|
||||
PUT my_index/session/session_2
|
||||
PUT my_index/_doc/session_2
|
||||
{
|
||||
"user_id": "jpountz",
|
||||
"session_data": "none", <3>
|
||||
|
@ -66,13 +66,13 @@ retrieved, but none of its contents are indexed in any way:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"session": { <1>
|
||||
"_doc": { <1>
|
||||
"enabled": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
PUT my_index/session/session_1
|
||||
PUT my_index/_doc/session_1
|
||||
{
|
||||
"user_id": "kimchy",
|
||||
"session_data": {
|
||||
|
@ -83,12 +83,12 @@ PUT my_index/session/session_1
|
|||
"last_updated": "2015-12-06T18:20:22"
|
||||
}
|
||||
|
||||
GET my_index/session/session_1 <2>
|
||||
GET my_index/_doc/session_1 <2>
|
||||
|
||||
GET my_index/_mapping <3>
|
||||
--------------------------------------------------
|
||||
// CONSOLE
|
||||
<1> The entire `session` mapping type is disabled.
|
||||
<1> The entire `_doc` mapping type is disabled.
|
||||
<2> The document can be retrieved.
|
||||
<3> Checking the mapping reveals that no fields have been added.
|
||||
|
||||
|
|
|
@ -58,7 +58,7 @@ enabled for aggregations, as follows:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"my_field": { <1>
|
||||
"type": "text",
|
||||
|
@ -84,7 +84,7 @@ You can enable fielddata on an existing `text` field using the
|
|||
|
||||
[source,js]
|
||||
-----------------------------------
|
||||
PUT my_index/_mapping/my_type
|
||||
PUT my_index/_mapping/_doc
|
||||
{
|
||||
"properties": {
|
||||
"my_field": { <1>
|
||||
|
@ -126,7 +126,7 @@ number of docs that the segment should contain with `min_segment_size`:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"tag": {
|
||||
"type": "text",
|
||||
|
|
|
@ -14,7 +14,7 @@ Besides the <<built-in-date-formats,built-in formats>>, your own
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"date": {
|
||||
"type": "date",
|
||||
|
|
|
@ -8,7 +8,7 @@ Strings longer than the `ignore_above` setting will not be indexed or stored.
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"message": {
|
||||
"type": "keyword",
|
||||
|
@ -19,12 +19,12 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1 <2>
|
||||
PUT my_index/_doc/1 <2>
|
||||
{
|
||||
"message": "Syntax error"
|
||||
}
|
||||
|
||||
PUT my_index/my_type/2 <3>
|
||||
PUT my_index/_doc/2 <3>
|
||||
{
|
||||
"message": "Syntax error with some long stacktrace"
|
||||
}
|
||||
|
|
|
@ -17,7 +17,7 @@ For example:
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"number_one": {
|
||||
"type": "integer",
|
||||
|
@ -31,13 +31,13 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"text": "Some text value",
|
||||
"number_one": "foo" <1>
|
||||
}
|
||||
|
||||
PUT my_index/my_type/2
|
||||
PUT my_index/_doc/2
|
||||
{
|
||||
"text": "Some text value",
|
||||
"number_two": "foo" <2>
|
||||
|
@ -67,7 +67,7 @@ PUT my_index
|
|||
"index.mapping.ignore_malformed": true <1>
|
||||
},
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"number_one": { <1>
|
||||
"type": "byte"
|
||||
|
|
|
@ -38,7 +38,7 @@ all other fields use `docs` as the default.
|
|||
PUT my_index
|
||||
{
|
||||
"mappings": {
|
||||
"my_type": {
|
||||
"_doc": {
|
||||
"properties": {
|
||||
"text": {
|
||||
"type": "text",
|
||||
|
@ -49,7 +49,7 @@ PUT my_index
|
|||
}
|
||||
}
|
||||
|
||||
PUT my_index/my_type/1
|
||||
PUT my_index/_doc/1
|
||||
{
|
||||
"text": "Quick brown fox"
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue