Merge pull request #14977 from jpountz/test/plugin_mapping_upgrade
Add a test that upgrades succeed even if a mapping contains fields that come from a plugin.
This commit is contained in:
commit
60bb0d6907
|
@ -149,6 +149,16 @@ def start_node(version, release_dir, data_dir, repo_dir, tcp_port=DEFAULT_TRANSP
|
|||
cmd.append('-f') # version before 1.0 start in background automatically
|
||||
return subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
|
||||
|
||||
def install_plugin(version, release_dir, plugin_name):
|
||||
run_plugin(version, release_dir, 'install', [plugin_name])
|
||||
|
||||
def remove_plugin(version, release_dir, plugin_name):
|
||||
run_plugin(version, release_dir, 'remove', [plugin_name])
|
||||
|
||||
def run_plugin(version, release_dir, plugin_cmd, args):
|
||||
cmd = [os.path.join(release_dir, 'bin/plugin'), plugin_cmd] + args
|
||||
subprocess.check_call(cmd)
|
||||
|
||||
def create_client(http_port=DEFAULT_HTTP_TCP_PORT, timeout=30):
|
||||
logging.info('Waiting for node to startup')
|
||||
for _ in range(0, timeout):
|
||||
|
|
|
@ -0,0 +1,124 @@
|
|||
import create_bwc_index
|
||||
import logging
|
||||
import os
|
||||
import random
|
||||
import shutil
|
||||
import subprocess
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
def fetch_version(version):
|
||||
logging.info('fetching ES version %s' % version)
|
||||
if subprocess.call([sys.executable, os.path.join(os.path.split(sys.argv[0])[0], 'get-bwc-version.py'), version]) != 0:
|
||||
raise RuntimeError('failed to download ES version %s' % version)
|
||||
|
||||
def create_index(plugin, mapping, docs):
|
||||
'''
|
||||
Creates a static back compat index (.zip) with mappings using fields defined in plugins.
|
||||
'''
|
||||
|
||||
logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO,
|
||||
datefmt='%Y-%m-%d %I:%M:%S %p')
|
||||
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
|
||||
logging.getLogger('urllib3').setLevel(logging.WARN)
|
||||
|
||||
tmp_dir = tempfile.mkdtemp()
|
||||
plugin_installed = False
|
||||
node = None
|
||||
try:
|
||||
data_dir = os.path.join(tmp_dir, 'data')
|
||||
repo_dir = os.path.join(tmp_dir, 'repo')
|
||||
logging.info('Temp data dir: %s' % data_dir)
|
||||
logging.info('Temp repo dir: %s' % repo_dir)
|
||||
|
||||
version = '2.0.0'
|
||||
classifier = '%s-%s' %(plugin, version)
|
||||
index_name = 'index-%s' % classifier
|
||||
|
||||
# Download old ES releases if necessary:
|
||||
release_dir = os.path.join('backwards', 'elasticsearch-%s' % version)
|
||||
if not os.path.exists(release_dir):
|
||||
fetch_version(version)
|
||||
|
||||
create_bwc_index.install_plugin(version, release_dir, plugin)
|
||||
plugin_installed = True
|
||||
node = create_bwc_index.start_node(version, release_dir, data_dir, repo_dir, cluster_name=index_name)
|
||||
client = create_bwc_index.create_client()
|
||||
put_plugin_mappings(client, index_name, mapping, docs)
|
||||
create_bwc_index.shutdown_node(node)
|
||||
|
||||
print('%s server output:\n%s' % (version, node.stdout.read().decode('utf-8')))
|
||||
node = None
|
||||
create_bwc_index.compress_index(classifier, tmp_dir, 'plugins/%s/src/test/resources/indices/bwc' %plugin)
|
||||
finally:
|
||||
if node is not None:
|
||||
create_bwc_index.shutdown_node(node)
|
||||
if plugin_installed:
|
||||
create_bwc_index.remove_plugin(version, release_dir, plugin)
|
||||
shutil.rmtree(tmp_dir)
|
||||
|
||||
def put_plugin_mappings(client, index_name, mapping, docs):
|
||||
client.indices.delete(index=index_name, ignore=404)
|
||||
logging.info('Create single shard test index')
|
||||
|
||||
client.indices.create(index=index_name, body={
|
||||
'settings': {
|
||||
'number_of_shards': 1,
|
||||
'number_of_replicas': 0
|
||||
},
|
||||
'mappings': {
|
||||
'type': mapping
|
||||
}
|
||||
})
|
||||
health = client.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
|
||||
assert health['timed_out'] == False, 'cluster health timed out %s' % health
|
||||
|
||||
logging.info('Indexing documents')
|
||||
for i in range(len(docs)):
|
||||
client.index(index=index_name, doc_type="type", id=str(i), body=docs[i])
|
||||
logging.info('Flushing index')
|
||||
client.indices.flush(index=index_name)
|
||||
|
||||
logging.info('Running basic checks')
|
||||
count = client.count(index=index_name)['count']
|
||||
assert count == len(docs), "expected %d docs, got %d" %(len(docs), count)
|
||||
|
||||
def main():
|
||||
docs = [
|
||||
{
|
||||
"foo": "abc"
|
||||
},
|
||||
{
|
||||
"foo": "abcdef"
|
||||
},
|
||||
{
|
||||
"foo": "a"
|
||||
}
|
||||
]
|
||||
|
||||
murmur3_mapping = {
|
||||
'properties': {
|
||||
'foo': {
|
||||
'type': 'string',
|
||||
'fields': {
|
||||
'hash': {
|
||||
'type': 'murmur3'
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
create_index("mapper-murmur3", murmur3_mapping, docs)
|
||||
|
||||
size_mapping = {
|
||||
'_size': {
|
||||
'enabled': True
|
||||
}
|
||||
}
|
||||
|
||||
create_index("mapper-size", size_mapping, docs)
|
||||
|
||||
if __name__ == '__main__':
|
||||
main()
|
||||
|
|
@ -0,0 +1,77 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper.murmur3;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.plugin.mapper.MapperMurmur3Plugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.aggregations.AggregationBuilders;
|
||||
import org.elasticsearch.search.aggregations.metrics.cardinality.Cardinality;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
||||
public class Murmur3FieldMapperUpgradeTests extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Collections.singleton(MapperMurmur3Plugin.class);
|
||||
}
|
||||
|
||||
public void testUpgradeOldMapping() throws IOException {
|
||||
final String indexName = "index-mapper-murmur3-2.0.0";
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
Path backwardsIndex = getBwcIndicesPath().resolve(indexName + ".zip");
|
||||
try (InputStream stream = Files.newInputStream(backwardsIndex)) {
|
||||
TestUtil.unzip(stream, unzipDir);
|
||||
}
|
||||
assertTrue(Files.exists(unzipDataDir));
|
||||
|
||||
final String node = internalCluster().startNode();
|
||||
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, node).nodeDataPaths();
|
||||
assertEquals(1, nodePaths.length);
|
||||
Path dataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
||||
assertFalse(Files.exists(dataPath));
|
||||
Path src = unzipDataDir.resolve(indexName + "/nodes/0/indices");
|
||||
Files.move(src, dataPath);
|
||||
|
||||
ensureYellow();
|
||||
final SearchResponse countResponse = client().prepareSearch(indexName).setSize(0).get();
|
||||
ElasticsearchAssertions.assertHitCount(countResponse, 3L);
|
||||
|
||||
final SearchResponse cardinalityResponse = client().prepareSearch(indexName).addAggregation(
|
||||
AggregationBuilders.cardinality("card").field("foo.hash")).get();
|
||||
Cardinality cardinality = cardinalityResponse.getAggregations().get("card");
|
||||
assertEquals(3L, cardinality.getValue());
|
||||
}
|
||||
|
||||
}
|
Binary file not shown.
|
@ -0,0 +1,88 @@
|
|||
/*
|
||||
* Licensed to Elasticsearch under one or more contributor
|
||||
* license agreements. See the NOTICE file distributed with
|
||||
* this work for additional information regarding copyright
|
||||
* ownership. Elasticsearch licenses this file to you under
|
||||
* the Apache License, Version 2.0 (the "License"); you may
|
||||
* not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing,
|
||||
* software distributed under the License is distributed on an
|
||||
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
|
||||
* KIND, either express or implied. See the License for the
|
||||
* specific language governing permissions and limitations
|
||||
* under the License.
|
||||
*/
|
||||
|
||||
package org.elasticsearch.index.mapper.size;
|
||||
|
||||
import org.apache.lucene.util.LuceneTestCase;
|
||||
import org.apache.lucene.util.TestUtil;
|
||||
import org.elasticsearch.action.search.SearchResponse;
|
||||
import org.elasticsearch.env.NodeEnvironment;
|
||||
import org.elasticsearch.plugin.mapper.MapperSizePlugin;
|
||||
import org.elasticsearch.plugins.Plugin;
|
||||
import org.elasticsearch.search.SearchHit;
|
||||
import org.elasticsearch.search.SearchHitField;
|
||||
import org.elasticsearch.test.ESIntegTestCase;
|
||||
import org.elasticsearch.test.hamcrest.ElasticsearchAssertions;
|
||||
|
||||
import java.io.IOException;
|
||||
import java.io.InputStream;
|
||||
import java.nio.file.Files;
|
||||
import java.nio.file.Path;
|
||||
import java.util.Collection;
|
||||
import java.util.Collections;
|
||||
import java.util.Map;
|
||||
|
||||
@ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.TEST, numDataNodes = 0)
|
||||
@LuceneTestCase.SuppressFileSystems("ExtrasFS")
|
||||
public class SizeFieldMapperUpgradeTests extends ESIntegTestCase {
|
||||
|
||||
@Override
|
||||
protected Collection<Class<? extends Plugin>> nodePlugins() {
|
||||
return Collections.singleton(MapperSizePlugin.class);
|
||||
}
|
||||
|
||||
public void testUpgradeOldMapping() throws IOException {
|
||||
final String indexName = "index-mapper-size-2.0.0";
|
||||
Path unzipDir = createTempDir();
|
||||
Path unzipDataDir = unzipDir.resolve("data");
|
||||
Path backwardsIndex = getBwcIndicesPath().resolve(indexName + ".zip");
|
||||
try (InputStream stream = Files.newInputStream(backwardsIndex)) {
|
||||
TestUtil.unzip(stream, unzipDir);
|
||||
}
|
||||
assertTrue(Files.exists(unzipDataDir));
|
||||
|
||||
final String node = internalCluster().startNode();
|
||||
Path[] nodePaths = internalCluster().getInstance(NodeEnvironment.class, node).nodeDataPaths();
|
||||
assertEquals(1, nodePaths.length);
|
||||
Path dataPath = nodePaths[0].resolve(NodeEnvironment.INDICES_FOLDER);
|
||||
assertFalse(Files.exists(dataPath));
|
||||
Path src = unzipDataDir.resolve(indexName + "/nodes/0/indices");
|
||||
Files.move(src, dataPath);
|
||||
|
||||
ensureYellow();
|
||||
final SearchResponse countResponse = client().prepareSearch(indexName).setSize(0).get();
|
||||
ElasticsearchAssertions.assertHitCount(countResponse, 3L);
|
||||
|
||||
final SearchResponse sizeResponse = client().prepareSearch(indexName)
|
||||
.addField("_source")
|
||||
.addField("_size")
|
||||
.get();
|
||||
ElasticsearchAssertions.assertHitCount(sizeResponse, 3L);
|
||||
for (SearchHit hit : sizeResponse.getHits().getHits()) {
|
||||
String source = hit.getSourceAsString();
|
||||
assertNotNull(source);
|
||||
Map<String, SearchHitField> fields = hit.getFields();
|
||||
assertTrue(fields.containsKey("_size"));
|
||||
Number size = fields.get("_size").getValue();
|
||||
assertNotNull(size);
|
||||
assertEquals(source.length(), size.longValue());
|
||||
}
|
||||
}
|
||||
|
||||
}
|
Binary file not shown.
Loading…
Reference in New Issue