Merge pull request #16655 from rjernst/dev-tools-cleanup

Build: Remove legacy testing and releasing scripts.
This commit is contained in:
Ryan Ernst 2016-02-22 13:43:45 -08:00
commit 7a4d23aeb2
5 changed files with 0 additions and 956 deletions

View File

@ -1,481 +0,0 @@
#!/usr/bin/env ruby
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License
#
# NAME
# build_randomization.rb -- Generate property file for the JDK randomization test
#
# SYNOPSIS
# build_randomization.rb [-d] [-l|t]
#
# DESCRIPTION
# This script takes the randomization choices described in RANDOM_CHOICE and generates appropriate JAVA property file 'prop.txt'
# This property file also contain the appropriate JDK selection, randomized. JDK randomization is based on what is available on the Jenkins tools
# directory. This script is used by Jenkins test system to conduct Elasticsearch server randomization testing.
#
# In hash RANDOM_CHOICES, the key of randomization hash maps to key of java property. The value of the hash describes the possible value of the randomization
#
# For example RANDOM_CHOICES = { 'es.node.mode' => {:choices => ['local', 'network'], :method => :get_random_one} } means
# es.node.mode will be set to either 'local' or 'network', each with 50% of probability
#
# OPTIONS SUMMARY
# The options are as follows:
#
# -d, --debug Increase logging verbosity for debugging purpose
# -t, --test Run in test mode. The script will execute unit tests.
# -l, --local Run in local mode. In this mode, directory structure will be created under current directory to mimic
# Jenkins' server directory layout. This mode is mainly used for development.
require 'enumerator'
require 'getoptlong'
require 'log4r'
require 'optparse'
require 'rubygems'
require 'yaml'
include Log4r
RANDOM_CHOICES = {
'tests.jvm.argline' => [
{:choices => ['-server'], :method => 'get_random_one'},
{:choices => ['-XX:+UseConcMarkSweepGC', '-XX:+UseParallelGC', '-XX:+UseSerialGC', '-XX:+UseG1GC'], :method => 'get_random_one'},
{:choices => ['-XX:+UseCompressedOops', '-XX:-UseCompressedOops'], :method => 'get_random_one'},
{:choices => ['-XX:+AggressiveOpts'], :method => 'get_50_percent'}
],
'es.node.mode' => {:choices => ['local', 'network'], :method => 'get_random_one'},
# bug forced to be false for now :test_nightly => { :method => :true_or_false},
'tests.nightly' => {:selections => false},
'tests.heap.size' => {:choices => [512, 1024], :method => :random_heap},
'tests.assertion.disabled'=> {:choices => 'org.elasticsearch', :method => 'get_10_percent'},
'tests.security.manager' => {:choices => [true, false], :method => 'get_90_percent'},
}
L = Logger.new 'test_randomizer'
L.outputters = Outputter.stdout
L.level = INFO
C = {:local => false, :test => false}
OptionParser.new do |opts|
opts.banner = "Usage: build_randomization.rb [options]"
opts.on("-d", "--debug", "Debug mode") do |d|
L.level = DEBUG
end
opts.on("-l", "--local", "Run in local mode") do |l|
C[:local] = true
end
opts.on("-t", "--test", "Run unit tests") do |t|
C[:test] = true
end
end.parse!
class Randomizer
attr_accessor :data_array
def initialize(data_array)
@data_array = data_array
end
def true_or_false
[true, false][rand(2)]
end
def random_heap
inner_data_array = [data_array[0], data_array[1], data_array[0] + rand(data_array[1] - data_array[0])]
"%sm" % inner_data_array[rand(inner_data_array.size)]
end
def get_random_with_distribution(mdata_array, distribution)
L.debug "randomized distribution data %s" % YAML.dump(mdata_array)
L.debug "randomized distribution distribution %s" % YAML.dump(distribution)
carry = 0
distribution_map = distribution.enum_for(:each_with_index).map { |x,i| pre_carry = carry ; carry += x; {i => x + pre_carry} }
random_size = distribution_map.last.values.first
selection = rand(random_size)
#get the index that randomize choice mapped to
choice = distribution_map.select do |x|
x.values.first > selection #only keep the index with distribution value that is higher than the random generated number
end.first.keys.first #first hash's first key is the index we want
L.debug("randomized distribution choice %s" % mdata_array[choice])
mdata_array[choice]
end
def get_random_one
data_array[rand(data_array.size)]
end
def method_missing(meth, *args, &block)
# trap randomization based on percentage
if meth.to_s =~ /^get_(\d+)_percent/
percentage = $1.to_i
remain = 100 - percentage
#data = args.first
normalized_data = if(!data_array.kind_of?(Array))
[data_array, nil]
else
data_array
end
get_random_with_distribution(normalized_data, [percentage, remain])
else
super
end
end
end
class JDKSelector
attr_reader :directory, :jdk_list
def initialize(directory)
@directory = directory
end
# get selection of available JDKs from Jenkins automatic install directory
def get_jdk
@jdk_list = Dir.entries(directory).select do |x|
x.chars.first == 'J'
end.map do |y|
File.join(directory, y)
end
self
end
def filter_java_6(files)
files.select{ |i| File.basename(i).split(/[^0-9]/)[-1].to_i > 6 }
end
# do randomized selection from a given array
def select_one(selection_array = nil)
selection_array = filter_java_6(selection_array || @jdk_list)
Randomizer.new(selection_array).get_random_one
end
def JDKSelector.generate_jdk_hash(jdk_choice)
file_separator = if Gem.win_platform?
File::ALT_SEPARATOR
else
File::SEPARATOR
end
{
:PATH => [jdk_choice, 'bin'].join(file_separator) + File::PATH_SEPARATOR + ENV['PATH'],
:JAVA_HOME => jdk_choice
}
end
end
#
# Fix argument JDK selector
#
class FixedJDKSelector < JDKSelector
def initialize(directory)
@directory = [*directory] #selection of directories to pick from
end
def get_jdk
#since JDK selection is already specified..jdk list is the @directory
@jdk_list = @directory
self
end
def select_one(selection_array = nil)
#bypass filtering since this is not automatic
selection_array ||= @jdk_list
Randomizer.new(selection_array).get_random_one
end
end
#
# Property file writer
#
class PropertyWriter
attr_reader :working_directory
def initialize(mworking_directory)
@working_directory = mworking_directory
end
# # pick first element out of array of hashes, generate write java property file
def generate_property_file(data)
directory = working_directory
#array transformation
content = data.to_a.map do |x|
x.join('=')
end.sort
file_name = (ENV['BUILD_ID'] + ENV['BUILD_NUMBER']) || 'prop' rescue 'prop'
file_name = file_name.split(File::SEPARATOR).first + '.txt'
L.debug "Property file name is %s" % file_name
File.open(File.join(directory, file_name), 'w') do |file|
file.write(content.join("\n"))
end
end
end
#
# Execute randomization logics
#
class RandomizedRunner
attr_reader :random_choices, :jdk, :p_writer
def initialize(mrandom_choices, mjdk, mwriter)
@random_choices = mrandom_choices
@jdk = mjdk
@p_writer = mwriter
end
def generate_selections
configuration = random_choices
L.debug "Enter %s" % __method__
L.debug "Configuration %s" % YAML.dump(configuration)
generated = {}
configuration.each do |k, v|
if(v.kind_of?(Hash))
if(v.has_key?(:method))
randomizer = Randomizer.new(v[:choices])
v[:selections] = randomizer.__send__(v[:method])
end
else
v.each do |x|
if(x.has_key?(:method))
randomizer = Randomizer.new(x[:choices])
x[:selections] = randomizer.__send__(x[:method])
end
end
end
end.each do |k, v|
if(v.kind_of?(Array))
selections = v.inject([]) do |sum, current_hash|
sum.push(current_hash[:selections])
end
else
selections = [v[:selections]] unless v[:selections].nil?
end
generated[k] = selections unless (selections.nil? || selections.size == 0)
end
L.debug "Generated selections %s" % YAML.dump(generated)
generated
end
def get_env_matrix(jdk_selection, selections)
L.debug "Enter %s" % __method__
#normalization
s = {}
selections.each do |k, v|
if(v.size > 1)
s[k] = v.compact.join(' ') #this should be dependent on class of v[0] and perform reduce operation instead... good enough for now
else
s[k] = v.first
end
end
j = JDKSelector.generate_jdk_hash(jdk_selection)
# create build description line
desc = {}
# TODO: better error handling
desc[:BUILD_DESC] = "%s,%s,heap[%s],%s%s%s%s" % [
File.basename(j[:JAVA_HOME]),
s['es.node.mode'],
s['tests.heap.size'],
s['tests.nightly'] ? 'nightly,':'',
s['tests.jvm.argline'].gsub(/-XX:/,''),
s.has_key?('tests.assertion.disabled')? ',assert off' : '',
s['tests.security.manager'] ? ',sec manager on' : ''
]
result = j.merge(s).merge(desc)
L.debug(YAML.dump(result))
result
end
def run!
p_writer.generate_property_file(get_env_matrix(jdk, generate_selections))
end
end
#
# Main
#
unless(C[:test])
# Check to see if this is running locally
unless(C[:local])
L.debug("Normal Mode")
working_directory = ENV.fetch('WORKSPACE', (Gem.win_platform? ? Dir.pwd : '/var/tmp'))
else
L.debug("Local Mode")
test_directory = 'tools/hudson.model.JDK/'
unless(File.exist?(test_directory))
L.info "running local mode, setting up running environment"
L.info "properties are written to file prop.txt"
FileUtils.mkpath "%sJDK6" % test_directory
FileUtils.mkpath "%sJDK7" % test_directory
end
working_directory = Dir.pwd
end
# script support both window and linux
# TODO: refactor into platform/machine dependent class structure
jdk = if(Gem.win_platform?)
#window mode jdk directories are fixed
#TODO: better logic
L.debug("Window Mode")
if(File.directory?('y:\jdk7\7u55')) #old window system under ec2
FixedJDKSelector.new('y:\jdk7\7u55')
else #new metal window system
FixedJDKSelector.new(['c:\PROGRA~1\JAVA\jdk1.8.0_05', 'c:\PROGRA~1\JAVA\jdk1.7.0_55'])
end
else
#Jenkins sets pwd prior to execution
L.debug("Linux Mode")
JDKSelector.new(File.join(ENV['PWD'],'tools','hudson.model.JDK'))
end
runner = RandomizedRunner.new(RANDOM_CHOICES,
jdk.get_jdk.select_one,
PropertyWriter.new(working_directory))
environment_matrix = runner.run!
exit 0
else
require "test/unit"
end
#
# Test
#
class TestJDKSelector < Test::Unit::TestCase
L = Logger.new 'test'
L.outputters = Outputter.stdout
L.level = DEBUG
def test_hash_generator
jdk_choice = '/dummy/jdk7'
generated = JDKSelector.generate_jdk_hash(jdk_choice)
L.debug "Generated %s" % generated
assert generated[:PATH].include?(jdk_choice), "PATH doesn't included choice"
assert generated[:JAVA_HOME].include?(jdk_choice), "JAVA home doesn't include choice"
end
end
class TestFixJDKSelector < Test::Unit::TestCase
L = Logger.new 'test'
L.outputters = Outputter.stdout
L.level = DEBUG
def test_initialize
['/home/dummy', ['/JDK7', '/home2'], ['home/dummy']].each do |x|
test_object = FixedJDKSelector.new(x)
assert_kind_of Array, test_object.directory
assert_equal [*x], test_object.directory
end
end
def test_select_one
test_array = %w(one two three)
test_object = FixedJDKSelector.new(test_array)
assert test_array.include?(test_object.get_jdk.select_one)
end
def test_hash_generator
jdk_choice = '/dummy/jdk7'
generated = FixedJDKSelector.generate_jdk_hash(jdk_choice)
L.debug "Generated %s" % generated
assert generated[:PATH].include?(jdk_choice), "PATH doesn't included choice"
assert generated[:JAVA_HOME].include?(jdk_choice), "JAVA home doesn't include choice"
end
end
class TestPropertyWriter < Test::Unit::TestCase
L = Logger.new 'test'
L.outputters = Outputter.stdout
L.level = DEBUG
def test_initialize
['/home/dummy','/tmp'].each do |x|
test_object = PropertyWriter.new(x)
assert_kind_of String, test_object.working_directory
assert_equal x, test_object.working_directory
end
end
def test_generate_property
test_file = '/tmp/prop.txt'
File.delete(test_file) if File.exist?(test_file)
test_object = PropertyWriter.new(File.dirname(test_file))
# default prop.txt
test_object.generate_property_file({:hi => 'there'})
assert(File.exist?(test_file))
File.open(test_file, 'r') do |properties_file|
properties_file.read.each_line do |line|
line.strip!
assert_equal 'hi=there', line, "content %s is not hi=there" % line
end
end
File.delete(test_file) if File.exist?(test_file)
end
end
class DummyPropertyWriter < PropertyWriter
def generate_property_file(data)
L.debug "generating property file for %s" % YAML.dump(data)
L.debug "on directory %s" % working_directory
end
end
class TestRandomizedRunner < Test::Unit::TestCase
def test_initialize
test_object = RandomizedRunner.new(RANDOM_CHOICES, '/tmp/dummy/jdk', po = PropertyWriter.new('/tmp'))
assert_equal RANDOM_CHOICES, test_object.random_choices
assert_equal '/tmp/dummy/jdk', test_object.jdk
assert_equal po, test_object.p_writer
end
def test_generate_selection_no_method
test_object = RandomizedRunner.new({'tests.one' => {:selections => false }}, '/tmp/dummy/jdk', po = DummyPropertyWriter.new('/tmp'))
selection = test_object.generate_selections
assert_equal false, selection['tests.one'].first, 'randomization without selection method fails'
end
def test_generate_with_method
test_object = RandomizedRunner.new({'es.node.mode' => {:choices => ['local', 'network'], :method => 'get_random_one'}},
'/tmp/dummy/jdk', po = DummyPropertyWriter.new('/tmp'))
selection = test_object.generate_selections
assert ['local', 'network'].include?(selection['es.node.mode'].first), 'selection choice is not correct'
end
def test_get_env_matrix
test_object = RandomizedRunner.new(RANDOM_CHOICES,
'/tmp/dummy/jdk', po = DummyPropertyWriter.new('/tmp'))
selection = test_object.generate_selections
env_matrix = test_object.get_env_matrix('/tmp/dummy/jdk', selection)
puts YAML.dump(env_matrix)
assert_equal '/tmp/dummy/jdk', env_matrix[:JAVA_HOME]
end
end

View File

@ -1,24 +0,0 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
#
# This is used for client testings to pull in master, 090 bits
#
URL_MASTER=http://s3-us-west-2.amazonaws.com/build.elasticsearch.org/origin/master/nightly/JDK7/elasticsearch-latest-SNAPSHOT.zip
URL_1x=http://s3-us-west-2.amazonaws.com/build.elasticsearch.org/origin/1.x/nightly/JDK7/elasticsearch-latest-SNAPSHOT.zip
URL_11=http://s3-us-west-2.amazonaws.com/build.elasticsearch.org/origin/1.1/nightly/JDK6/elasticsearch-latest-SNAPSHOT.zip
URL_10=http://s3-us-west-2.amazonaws.com/build.elasticsearch.org/origin/1.0/nightly/JDK6/elasticsearch-latest-SNAPSHOT.zip
URL_090=http://s3-us-west-2.amazonaws.com/build.elasticsearch.org/origin/0.90/nightly/JDK6/elasticsearch-latest-SNAPSHOT.zip

View File

@ -1,65 +0,0 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import argparse
try:
import boto.s3
except:
raise RuntimeError("""
S3 download requires boto to be installed
Use one of:
'pip install -U boto'
'apt-get install python-boto'
'easy_install boto'
""")
import boto.s3
def list_buckets(conn):
return conn.get_all_buckets()
def download_s3(conn, path, key, file, bucket):
print 'Downloading %s from Amazon S3 bucket %s/%s' % \
(file, bucket, os.path.join(path, key))
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
bucket = conn.get_bucket(bucket)
k = bucket.get_key(os.path.join(path, key))
k.get_contents_to_filename(file, cb=percent_cb, num_cb=100)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Downloads a bucket from Amazon S3')
parser.add_argument('--file', '-f', metavar='path to file',
help='path to store the bucket to', required=True)
parser.add_argument('--bucket', '-b', default='downloads.elasticsearch.org',
help='The S3 Bucket to download from')
parser.add_argument('--path', '-p', default='',
help='The key path to use')
parser.add_argument('--key', '-k', default=None,
help='The key - uses the file name as default key')
args = parser.parse_args()
if args.key:
key = args.key
else:
key = os.path.basename(args.file)
connection = boto.connect_s3()
download_s3(connection, args.path, key, args.file, args.bucket);

View File

@ -1,319 +0,0 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import random
import os
import tempfile
import shutil
import subprocess
import time
import argparse
import logging
import sys
import re
from datetime import datetime
try:
from elasticsearch import Elasticsearch
from elasticsearch.exceptions import ConnectionError
from elasticsearch.exceptions import TransportError
except ImportError as e:
print('Can\'t import elasticsearch please install `sudo pip install elasticsearch`')
raise e
'''This file executes a basic upgrade test by running a full cluster restart.
The upgrade test starts 2 or more nodes of an old elasticsearch version, indexes
a random number of documents into the running nodes and executes a full cluster restart.
After the nodes are recovered a small set of basic checks are executed to ensure all
documents are still searchable and field data can be loaded etc.
NOTE: This script requires the elasticsearch python client `elasticsearch-py` run the following command to install:
`sudo pip install elasticsearch`
if you are running python3 you need to install the client using pip3. On OSX `pip3` will be included in the Python 3.4
release available on `https://www.python.org/download/`:
`sudo pip3 install elasticsearch`
See `https://github.com/elasticsearch/elasticsearch-py` for details
In order to run this test two different version of elasticsearch are required. Both need to be unpacked into
the same directory:
```
$ cd /path/to/elasticsearch/clone
$ mkdir backwards && cd backwards
$ wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-1.3.1.tar.gz
$ wget https://download.elasticsearch.org/elasticsearch/elasticsearch/elasticsearch-0.90.13.tar.gz
$ tar -zxvf elasticsearch-1.3.1.tar.gz && tar -zxvf elasticsearch-0.90.13.tar.gz
$ cd ..
$ python dev-tools/upgrade-tests.py --version.backwards 0.90.13 --version.current 1.3.1
```
'''
BLACK_LIST = {'1.2.0' : { 'reason': 'Contains a major bug where routing hashes are not consistent with previous version',
'issue': 'https://github.com/elasticsearch/elasticsearch/pull/6393'},
'1.3.0' : { 'reason': 'Lucene Related bug prevents upgrades from 0.90.7 and some earlier versions ',
'issue' : 'https://github.com/elasticsearch/elasticsearch/pull/7055'}}
# sometimes returns True
def rarely():
return random.randint(0, 10) == 0
# usually returns True
def frequently():
return not rarely()
# asserts the correctness of the given hits given they are sorted asc
def assert_sort(hits):
values = [hit['sort'] for hit in hits['hits']['hits']]
assert len(values) > 0, 'expected non emtpy result'
val = min(values)
for x in values:
assert x >= val, '%s >= %s' % (x, val)
val = x
# asserts that the cluster health didn't timeout etc.
def assert_health(cluster_health, num_shards, num_replicas):
assert cluster_health['timed_out'] == False, 'cluster health timed out %s' % cluster_health
# Starts a new elasticsearch node from a released & untared version.
# This node uses unicast discovery with the provided unicast host list and starts
# the nodes with the given data directory. This allows shutting down and starting up
# nodes on the same data dir simulating a full cluster restart.
def start_node(version, data_dir, node_dir, unicast_host_list, tcp_port, http_port):
es_run_path = os.path.join(node_dir, 'elasticsearch-%s' % (version), 'bin/elasticsearch')
if version.startswith('0.90.'):
foreground = '-f' # 0.90.x starts in background automatically
else:
foreground = ''
return subprocess.Popen([es_run_path,
'-Des.path.data=%s' % data_dir, '-Des.cluster.name=upgrade_test',
'-Des.discovery.zen.ping.unicast.hosts=%s' % unicast_host_list,
'-Des.transport.tcp.port=%s' % tcp_port,
'-Des.http.port=%s' % http_port,
foreground], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
# Indexes the given number of document into the given index
# and randomly runs refresh, optimize and flush commands
def index_documents(es, index_name, type, num_docs):
logging.info('Indexing %s docs' % num_docs)
for id in range(0, num_docs):
es.index(index=index_name, doc_type=type, id=id, body={'string': str(random.randint(0, 100)),
'long_sort': random.randint(0, 100),
'double_sort' : float(random.randint(0, 100))})
if rarely():
es.indices.refresh(index=index_name)
if rarely():
es.indices.flush(index=index_name, force=frequently())
if rarely():
es.indices.optimize(index=index_name)
es.indices.refresh(index=index_name)
# Runs a basic number of assertions including:
# - document counts
# - match all search with sort on double / long
# - Realtime GET operations
# TODO(simonw): we should add stuff like:
# - dates including sorting
# - string sorting
# - docvalues if available
# - global ordinal if available
def run_basic_asserts(es, index_name, type, num_docs):
count = es.count(index=index_name)['count']
assert count == num_docs, 'Expected %r but got %r documents' % (num_docs, count)
for _ in range(0, num_docs):
random_doc_id = random.randint(0, num_docs-1)
doc = es.get(index=index_name, doc_type=type, id=random_doc_id)
assert doc, 'Expected document for id %s but got %s' % (random_doc_id, doc)
assert_sort(es.search(index=index_name,
body={
'sort': [
{'double_sort': {'order': 'asc'}}
]
}))
assert_sort(es.search(index=index_name,
body={
'sort': [
{'long_sort': {'order': 'asc'}}
]
}))
# picks a random version or and entire random version tuple from the directory
# to run the backwards tests against.
def pick_random_upgrade_version(directory, lower_version=None, upper_version=None):
if lower_version and upper_version:
return lower_version, upper_version
assert os.path.isdir(directory), 'No such directory %s' % directory
versions = []
for version in map(lambda x : x[len('elasticsearch-'):], filter(lambda x : re.match(r'^elasticsearch-\d+[.]\d+[.]\d+$', x), os.listdir(directory))):
if not version in BLACK_LIST:
versions.append(build_tuple(version))
versions.sort()
if lower_version: # lower version is set - picking a higher one
versions = filter(lambda x : x > build_tuple(lower_version), versions)
assert len(versions) >= 1, 'Expected at least 1 higher version than %s version in %s ' % (lower_version, directory)
random.shuffle(versions)
return lower_version, build_version(versions[0])
if upper_version:
versions = filter(lambda x : x < build_tuple(upper_version), versions)
assert len(versions) >= 1, 'Expected at least 1 lower version than %s version in %s ' % (upper_version, directory)
random.shuffle(versions)
return build_version(versions[0]), upper_version
assert len(versions) >= 2, 'Expected at least 2 different version in %s but found %s' % (directory, len(versions))
random.shuffle(versions)
versions = versions[0:2]
versions.sort()
return build_version(versions[0]), build_version(versions[1])
def build_version(version_tuple):
return '.'.join([str(x) for x in version_tuple])
def build_tuple(version_string):
return [int(x) for x in version_string.split('.')]
# returns a new elasticsearch client and ensures the all nodes have joined the cluster
# this method waits at most 30 seconds for all nodes to join
def new_es_instance(num_nodes, http_port, timeout = 30):
logging.info('Waiting for %s nodes to join the cluster' % num_nodes)
for _ in range(0, timeout):
# TODO(simonw): ask Honza if there is a better way to do this?
try:
es = Elasticsearch([
{'host': '127.0.0.1', 'port': http_port + x}
for x in range(0, num_nodes)])
es.cluster.health(wait_for_nodes=num_nodes)
es.count() # can we actually search or do we get a 503? -- anyway retry
return es
except (ConnectionError, TransportError):
pass
time.sleep(1)
assert False, 'Timed out waiting for %s nodes for %s seconds' % (num_nodes, timeout)
def assert_versions(bwc_version, current_version, node_dir):
assert [int(x) for x in bwc_version.split('.')] < [int(x) for x in current_version.split('.')],\
'[%s] must be < than [%s]' % (bwc_version, current_version)
for version in [bwc_version, current_version]:
assert not version in BLACK_LIST, 'Version %s is blacklisted - %s, see %s' \
% (version, BLACK_LIST[version]['reason'],
BLACK_LIST[version]['issue'])
dir = os.path.join(node_dir, 'elasticsearch-%s' % current_version)
assert os.path.isdir(dir), 'Expected elasticsearch-%s install directory does not exists: %s' % (version, dir)
def full_cluster_restart(node_dir, current_version, bwc_version, tcp_port, http_port):
assert_versions(bwc_version, current_version, node_dir)
num_nodes = random.randint(2, 3)
nodes = []
data_dir = tempfile.mkdtemp()
logging.info('Running upgrade test from [%s] to [%s] seed: [%s] es.path.data: [%s] es.http.port [%s] es.tcp.port [%s]'
% (bwc_version, current_version, seed, data_dir, http_port, tcp_port))
try:
logging.info('Starting %s BWC nodes of version %s' % (num_nodes, bwc_version))
unicast_addresses = ','.join(['127.0.0.1:%s' % (tcp_port+x) for x in range(0, num_nodes)])
for id in range(0, num_nodes):
nodes.append(start_node(bwc_version, data_dir, node_dir, unicast_addresses, tcp_port+id, http_port+id))
es = new_es_instance(num_nodes, http_port)
es.indices.delete(index='test_index', ignore=404)
num_shards = random.randint(1, 10)
num_replicas = random.randint(0, 1)
logging.info('Create index with [%s] shards and [%s] replicas' % (num_shards, num_replicas))
es.indices.create(index='test_index', body={
# TODO(simonw): can we do more here in terms of randomization - seems hard due to all the different version
'settings': {
'number_of_shards': num_shards,
'number_of_replicas': num_replicas
}
})
logging.info('Nodes joined, waiting for green status')
health = es.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
assert_health(health, num_shards, num_replicas)
num_docs = random.randint(10, 100)
index_documents(es, 'test_index', 'test_type', num_docs)
logging.info('Run basic asserts before full cluster restart')
run_basic_asserts(es, 'test_index', 'test_type', num_docs)
logging.info('kill bwc nodes -- prepare upgrade')
for node in nodes:
node.terminate()
# now upgrade the nodes and rerun the checks
tcp_port = tcp_port + len(nodes) # bump up port to make sure we can claim them
http_port = http_port + len(nodes)
logging.info('Full Cluster restart starts upgrading to version [elasticsearch-%s] es.http.port [%s] es.tcp.port [%s]'
% (current_version, http_port, tcp_port))
nodes = []
unicast_addresses = ','.join(['127.0.0.1:%s' % (tcp_port+x) for x in range(0, num_nodes)])
for id in range(0, num_nodes+1): # one more to trigger relocation
nodes.append(start_node(current_version, data_dir, node_dir, unicast_addresses, tcp_port+id, http_port+id))
es = new_es_instance(num_nodes+1, http_port)
logging.info('Nodes joined, waiting for green status')
health = es.cluster.health(wait_for_status='green', wait_for_relocating_shards=0)
assert_health(health, num_shards, num_replicas)
run_basic_asserts(es, 'test_index', 'test_type', num_docs)
# by running the indexing again we try to catch possible mapping problems after the upgrade
index_documents(es, 'test_index', 'test_type', num_docs)
run_basic_asserts(es, 'test_index', 'test_type', num_docs)
logging.info("[SUCCESS] - all test passed upgrading from version [%s] to version [%s]" % (bwc_version, current_version))
finally:
for node in nodes:
node.terminate()
time.sleep(1) # wait a second until removing the data dirs to give the nodes a chance to shutdown
shutil.rmtree(data_dir) # remove the temp data dir
if __name__ == '__main__':
logging.basicConfig(format='[%(levelname)s] [%(asctime)s] %(message)s', level=logging.INFO,
datefmt='%Y-%m-%d %I:%M:%S %p')
logging.getLogger('elasticsearch').setLevel(logging.ERROR)
logging.getLogger('urllib3').setLevel(logging.WARN)
parser = argparse.ArgumentParser(description='Tests Full Cluster Restarts across major version')
parser.add_argument('--version.backwards', '-b', dest='backwards_version', metavar='V',
help='The elasticsearch version to upgrade from')
parser.add_argument('--version.current', '-c', dest='current_version', metavar='V',
help='The elasticsearch version to upgrade to')
parser.add_argument('--seed', '-s', dest='seed', metavar='N', type=int,
help='The random seed to use')
parser.add_argument('--backwards.dir', '-d', dest='bwc_directory', default='backwards', metavar='dir',
help='The directory to the backwards compatibility sources')
parser.add_argument('--tcp.port', '-p', dest='tcp_port', default=9300, metavar='port', type=int,
help='The port to use as the minimum port for TCP communication')
parser.add_argument('--http.port', '-t', dest='http_port', default=9200, metavar='port', type=int,
help='The port to use as the minimum port for HTTP communication')
parser.set_defaults(bwc_directory='backwards')
parser.set_defaults(seed=int(time.time()))
args = parser.parse_args()
node_dir = args.bwc_directory
current_version = args.current_version
bwc_version = args.backwards_version
seed = args.seed
random.seed(seed)
bwc_version, current_version = pick_random_upgrade_version(node_dir, bwc_version, current_version)
tcp_port = args.tcp_port
http_port = args.http_port
try:
full_cluster_restart(node_dir, current_version, bwc_version, tcp_port, http_port)
except:
logging.warn('REPRODUCE WITH: \n\t`python %s --version.backwards %s --version.current %s --seed %s --tcp.port %s --http.port %s`'
% (sys.argv[0], bwc_version, current_version, seed, tcp_port, http_port))
raise

View File

@ -1,67 +0,0 @@
# Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import argparse
try:
import boto.s3
except:
raise RuntimeError("""
S3 upload requires boto to be installed
Use one of:
'pip install -U boto'
'apt-get install python-boto'
'easy_install boto'
""")
import boto.s3
def list_buckets(conn):
return conn.get_all_buckets()
def upload_s3(conn, path, key, file, bucket):
print 'Uploading %s to Amazon S3 bucket %s/%s' % \
(file, bucket, os.path.join(path, key))
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
bucket = conn.create_bucket(bucket)
k = bucket.new_key(os.path.join(path, key))
k.set_contents_from_filename(file, cb=percent_cb, num_cb=100)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Uploads files to Amazon S3')
parser.add_argument('--file', '-f', metavar='path to file',
help='the branch to release from', required=True)
parser.add_argument('--bucket', '-b', metavar='B42', default='download.elasticsearch.org',
help='The S3 Bucket to upload to')
parser.add_argument('--path', '-p', metavar='elasticsearch/elasticsearch', default='elasticsearch/elasticsearch',
help='The key path to use')
parser.add_argument('--key', '-k', metavar='key', default=None,
help='The key - uses the file name as default key')
args = parser.parse_args()
if args.key:
key = args.key
else:
key = os.path.basename(args.file)
connection = boto.connect_s3()
upload_s3(connection, args.path, key, args.file, args.bucket);