2022-10-28 06:41:47 -04:00
|
|
|
#!/usr/bin/env python3
|
2016-04-14 02:02:17 -04:00
|
|
|
##
|
|
|
|
# Licensed to the Apache Software Foundation (ASF) under one
|
|
|
|
# or more contributor license agreements. See the NOTICE file
|
|
|
|
# distributed with this work for additional information
|
|
|
|
# regarding copyright ownership. The ASF licenses this file
|
|
|
|
# to you under the Apache License, Version 2.0 (the
|
|
|
|
# "License"); you may not use this file except in compliance
|
|
|
|
# with the License. You may obtain a copy of the License at
|
|
|
|
#
|
|
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
|
|
#
|
|
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
|
|
# See the License for the specific language governing permissions and
|
|
|
|
# limitations under the License.
|
|
|
|
|
2016-08-17 03:47:16 -04:00
|
|
|
# pylint: disable=invalid-name
|
|
|
|
# To disable 'invalid constant name' warnings.
|
|
|
|
# pylint: disable=import-error
|
|
|
|
# Testing environment may not have all dependencies.
|
|
|
|
|
2016-08-09 17:50:25 -04:00
|
|
|
"""
|
|
|
|
This script uses Jenkins REST api to collect test result(s) of given build/builds and generates
|
|
|
|
flakyness data about unittests.
|
|
|
|
Print help: report-flakies.py -h
|
|
|
|
"""
|
2016-08-17 03:47:16 -04:00
|
|
|
|
2016-04-14 02:02:17 -04:00
|
|
|
import argparse
|
|
|
|
import logging
|
2016-08-09 17:50:25 -04:00
|
|
|
import os
|
2016-08-17 03:47:16 -04:00
|
|
|
import time
|
|
|
|
from collections import OrderedDict
|
|
|
|
from jinja2 import Template
|
|
|
|
|
2016-04-14 02:02:17 -04:00
|
|
|
import requests
|
|
|
|
|
2016-08-09 17:50:25 -04:00
|
|
|
import findHangingTests
|
|
|
|
|
2016-04-14 02:02:17 -04:00
|
|
|
parser = argparse.ArgumentParser()
|
2016-08-09 17:50:25 -04:00
|
|
|
parser.add_argument(
|
|
|
|
'--urls', metavar='URL', action='append', required=True,
|
|
|
|
help='Urls to analyze, which can refer to simple projects, multi-configuration projects or '
|
|
|
|
'individual build run.')
|
|
|
|
parser.add_argument('--excluded-builds', metavar='n1,n2', action='append',
|
|
|
|
help='List of build numbers to exclude (or "None"). Not required, '
|
|
|
|
'but if specified, number of uses should be same as that of --urls '
|
|
|
|
'since the values are matched.')
|
|
|
|
parser.add_argument('--max-builds', metavar='n', action='append', type=int,
|
|
|
|
help='The maximum number of builds to use (if available on jenkins). Specify '
|
|
|
|
'0 to analyze all builds. Not required, but if specified, number of uses '
|
|
|
|
'should be same as that of --urls since the values are matched.')
|
2017-11-29 20:53:16 -05:00
|
|
|
parser.add_argument('--is-yetus', metavar='True/False', action='append', choices=['True', 'False'],
|
|
|
|
help='True, if build is yetus style i.e. look for maven output in artifacts; '
|
|
|
|
'False, if maven output is in <url>/consoleText itself.')
|
2016-08-09 17:50:25 -04:00
|
|
|
parser.add_argument(
|
|
|
|
"--mvn", action="store_true",
|
2016-04-14 02:02:17 -04:00
|
|
|
help="Writes two strings for including/excluding these flaky tests using maven flags. These "
|
2016-05-24 23:39:54 -04:00
|
|
|
"strings are written to files so they can be saved as artifacts and easily imported in "
|
|
|
|
"other projects. Also writes timeout and failing tests in separate files for "
|
2016-08-09 17:50:25 -04:00
|
|
|
"reference.")
|
2020-10-19 23:51:34 -04:00
|
|
|
parser.add_argument("-o", "--output", metavar='dir', action='store', required=False,
|
|
|
|
help="the output directory")
|
2016-04-14 02:02:17 -04:00
|
|
|
parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true")
|
|
|
|
args = parser.parse_args()
|
|
|
|
|
|
|
|
logging.basicConfig()
|
2016-05-24 23:39:54 -04:00
|
|
|
logger = logging.getLogger(__name__)
|
2016-04-14 02:02:17 -04:00
|
|
|
if args.verbose:
|
2016-05-24 23:39:54 -04:00
|
|
|
logger.setLevel(logging.INFO)
|
2016-04-14 02:02:17 -04:00
|
|
|
|
2020-10-19 23:51:34 -04:00
|
|
|
output_dir = '.'
|
|
|
|
if args.output is not None:
|
|
|
|
output_dir = args.output
|
|
|
|
if not os.path.exists(output_dir):
|
|
|
|
os.makedirs(output_dir)
|
2016-05-24 23:39:54 -04:00
|
|
|
|
2017-11-29 20:53:16 -05:00
|
|
|
def get_bad_tests(build_url, is_yetus):
|
2016-08-09 17:50:25 -04:00
|
|
|
"""
|
2017-11-29 20:53:16 -05:00
|
|
|
Given url of an executed build, analyzes its maven output, and returns
|
2016-08-09 17:50:25 -04:00
|
|
|
[list of all tests, list of timeout tests, list of failed tests].
|
2017-11-29 20:53:16 -05:00
|
|
|
Returns None if can't get maven output from the build or if there is any other error.
|
2016-08-09 17:50:25 -04:00
|
|
|
"""
|
2016-05-27 19:39:13 -04:00
|
|
|
logger.info("Analyzing %s", build_url)
|
2018-07-30 13:36:54 -04:00
|
|
|
needed_fields="_class,building"
|
|
|
|
if is_yetus:
|
|
|
|
needed_fields+=",artifacts[fileName,relativePath]"
|
|
|
|
response = requests.get(build_url + "/api/json?tree=" + needed_fields).json()
|
2016-08-09 17:50:25 -04:00
|
|
|
if response["building"]:
|
2016-05-27 19:39:13 -04:00
|
|
|
logger.info("Skipping this build since it is in progress.")
|
|
|
|
return {}
|
2017-11-29 20:53:16 -05:00
|
|
|
console_url = None
|
|
|
|
if is_yetus:
|
|
|
|
for artifact in response["artifacts"]:
|
|
|
|
if artifact["fileName"] == "patch-unit-root.txt":
|
|
|
|
console_url = build_url + "/artifact/" + artifact["relativePath"]
|
|
|
|
break
|
|
|
|
if console_url is None:
|
|
|
|
logger.info("Can't find 'patch-unit-root.txt' artifact for Yetus build %s\n. Ignoring "
|
|
|
|
"this build.", build_url)
|
|
|
|
return
|
|
|
|
else:
|
|
|
|
console_url = build_url + "/consoleText"
|
2016-08-09 17:50:25 -04:00
|
|
|
build_result = findHangingTests.get_bad_tests(console_url)
|
|
|
|
if not build_result:
|
|
|
|
logger.info("Ignoring build %s", build_url)
|
|
|
|
return
|
|
|
|
return build_result
|
|
|
|
|
|
|
|
|
|
|
|
def expand_multi_config_projects(cli_args):
|
|
|
|
"""
|
|
|
|
If any url is of type multi-configuration project (i.e. has key 'activeConfigurations'),
|
|
|
|
get urls for individual jobs.
|
|
|
|
"""
|
|
|
|
job_urls = cli_args.urls
|
|
|
|
excluded_builds_arg = cli_args.excluded_builds
|
|
|
|
max_builds_arg = cli_args.max_builds
|
2017-11-29 20:53:16 -05:00
|
|
|
is_yetus_arg = cli_args.is_yetus
|
2016-08-09 17:50:25 -04:00
|
|
|
if excluded_builds_arg is not None and len(excluded_builds_arg) != len(job_urls):
|
|
|
|
raise Exception("Number of --excluded-builds arguments should be same as that of --urls "
|
|
|
|
"since values are matched.")
|
|
|
|
if max_builds_arg is not None and len(max_builds_arg) != len(job_urls):
|
|
|
|
raise Exception("Number of --max-builds arguments should be same as that of --urls "
|
|
|
|
"since values are matched.")
|
|
|
|
final_expanded_urls = []
|
|
|
|
for (i, job_url) in enumerate(job_urls):
|
|
|
|
max_builds = 10000 # Some high number
|
2017-11-29 20:53:16 -05:00
|
|
|
is_yetus = False
|
|
|
|
if is_yetus_arg is not None:
|
|
|
|
is_yetus = is_yetus_arg[i] == "True"
|
2016-08-09 17:50:25 -04:00
|
|
|
if max_builds_arg is not None and max_builds_arg[i] != 0:
|
|
|
|
max_builds = int(max_builds_arg[i])
|
|
|
|
excluded_builds = []
|
|
|
|
if excluded_builds_arg is not None and excluded_builds_arg[i] != "None":
|
|
|
|
excluded_builds = [int(x) for x in excluded_builds_arg[i].split(",")]
|
2018-07-30 13:36:54 -04:00
|
|
|
request = requests.get(job_url + "/api/json?tree=_class,activeConfigurations%5Burl%5D")
|
|
|
|
if request.status_code != 200:
|
|
|
|
raise Exception("Failed to get job information from jenkins for url '" + job_url +
|
|
|
|
"'. Jenkins returned HTTP status " + str(request.status_code))
|
|
|
|
response = request.json()
|
2022-10-28 06:41:47 -04:00
|
|
|
if "activeConfigurations" in response:
|
2016-08-09 17:50:25 -04:00
|
|
|
for config in response["activeConfigurations"]:
|
|
|
|
final_expanded_urls.append({'url':config["url"], 'max_builds': max_builds,
|
2017-11-29 20:53:16 -05:00
|
|
|
'excludes': excluded_builds, 'is_yetus': is_yetus})
|
2016-05-24 23:39:54 -04:00
|
|
|
else:
|
2016-08-09 17:50:25 -04:00
|
|
|
final_expanded_urls.append({'url':job_url, 'max_builds': max_builds,
|
2017-11-29 20:53:16 -05:00
|
|
|
'excludes': excluded_builds, 'is_yetus': is_yetus})
|
2016-08-09 17:50:25 -04:00
|
|
|
return final_expanded_urls
|
2016-05-24 23:39:54 -04:00
|
|
|
|
|
|
|
|
|
|
|
# Set of timeout/failed tests across all given urls.
|
|
|
|
all_timeout_tests = set()
|
|
|
|
all_failed_tests = set()
|
2016-05-30 23:02:33 -04:00
|
|
|
all_hanging_tests = set()
|
|
|
|
# Contains { <url> : { <bad_test> : { 'all': [<build ids>], 'failed': [<build ids>],
|
|
|
|
# 'timeout': [<build ids>], 'hanging': [<builds ids>] } } }
|
2016-08-17 03:47:16 -04:00
|
|
|
url_to_bad_test_results = OrderedDict()
|
2017-05-18 19:53:28 -04:00
|
|
|
# Contains { <url> : [run_ids] }
|
|
|
|
# Used for common min/max build ids when generating sparklines.
|
|
|
|
url_to_build_ids = OrderedDict()
|
2016-05-24 23:39:54 -04:00
|
|
|
|
|
|
|
# Iterates over each url, gets test results and prints flaky tests.
|
2016-08-09 17:50:25 -04:00
|
|
|
expanded_urls = expand_multi_config_projects(args)
|
2016-05-24 23:39:54 -04:00
|
|
|
for url_max_build in expanded_urls:
|
|
|
|
url = url_max_build["url"]
|
2016-08-09 17:50:25 -04:00
|
|
|
excludes = url_max_build["excludes"]
|
2018-07-30 13:36:54 -04:00
|
|
|
json_response = requests.get(url + "/api/json?tree=id,builds%5Bnumber,url%5D").json()
|
2022-10-28 06:41:47 -04:00
|
|
|
if "builds" in json_response:
|
2016-05-24 23:39:54 -04:00
|
|
|
builds = json_response["builds"]
|
|
|
|
logger.info("Analyzing job: %s", url)
|
2016-04-14 02:02:17 -04:00
|
|
|
else:
|
2017-11-29 20:53:16 -05:00
|
|
|
builds = [{'number': json_response["id"], 'url': url}]
|
2016-05-24 23:39:54 -04:00
|
|
|
logger.info("Analyzing build : %s", url)
|
2016-04-14 02:02:17 -04:00
|
|
|
build_id_to_results = {}
|
|
|
|
num_builds = 0
|
2017-05-18 19:53:28 -04:00
|
|
|
url_to_build_ids[url] = []
|
2016-05-24 23:39:54 -04:00
|
|
|
build_ids_without_tests_run = []
|
2016-04-14 02:02:17 -04:00
|
|
|
for build in builds:
|
|
|
|
build_id = build["number"]
|
2016-08-09 17:50:25 -04:00
|
|
|
if build_id in excludes:
|
|
|
|
continue
|
2017-11-29 20:53:16 -05:00
|
|
|
result = get_bad_tests(build["url"], url_max_build['is_yetus'])
|
2016-08-09 17:50:25 -04:00
|
|
|
if not result:
|
2016-05-27 19:39:13 -04:00
|
|
|
continue
|
2016-05-24 23:39:54 -04:00
|
|
|
if len(result[0]) > 0:
|
|
|
|
build_id_to_results[build_id] = result
|
2016-04-14 02:02:17 -04:00
|
|
|
else:
|
2016-05-24 23:39:54 -04:00
|
|
|
build_ids_without_tests_run.append(build_id)
|
2016-04-14 02:02:17 -04:00
|
|
|
num_builds += 1
|
2017-05-18 19:53:28 -04:00
|
|
|
url_to_build_ids[url].append(build_id)
|
2016-05-24 23:39:54 -04:00
|
|
|
if num_builds == url_max_build["max_builds"]:
|
2016-04-14 02:02:17 -04:00
|
|
|
break
|
2017-05-18 19:53:28 -04:00
|
|
|
url_to_build_ids[url].sort()
|
2016-04-14 02:02:17 -04:00
|
|
|
|
|
|
|
# Collect list of bad tests.
|
|
|
|
bad_tests = set()
|
|
|
|
for build in build_id_to_results:
|
2016-05-30 23:02:33 -04:00
|
|
|
[_, failed_tests, timeout_tests, hanging_tests] = build_id_to_results[build]
|
2016-05-24 23:39:54 -04:00
|
|
|
all_timeout_tests.update(timeout_tests)
|
|
|
|
all_failed_tests.update(failed_tests)
|
2016-05-30 23:02:33 -04:00
|
|
|
all_hanging_tests.update(hanging_tests)
|
|
|
|
# Note that timedout tests are already included in failed tests.
|
|
|
|
bad_tests.update(failed_tests.union(hanging_tests))
|
2016-05-24 23:39:54 -04:00
|
|
|
|
2016-05-30 23:02:33 -04:00
|
|
|
# For each bad test, get build ids where it ran, timed out, failed or hanged.
|
2016-08-17 03:47:16 -04:00
|
|
|
test_to_build_ids = {key : {'all' : set(), 'timeout': set(), 'failed': set(),
|
|
|
|
'hanging' : set(), 'bad_count' : 0}
|
2016-08-09 17:50:25 -04:00
|
|
|
for key in bad_tests}
|
2016-04-14 02:02:17 -04:00
|
|
|
for build in build_id_to_results:
|
2016-05-30 23:02:33 -04:00
|
|
|
[all_tests, failed_tests, timeout_tests, hanging_tests] = build_id_to_results[build]
|
|
|
|
for bad_test in test_to_build_ids:
|
2016-08-17 03:47:16 -04:00
|
|
|
is_bad = False
|
2016-05-24 23:39:54 -04:00
|
|
|
if all_tests.issuperset([bad_test]):
|
2016-05-30 23:02:33 -04:00
|
|
|
test_to_build_ids[bad_test]["all"].add(build)
|
2016-05-24 23:39:54 -04:00
|
|
|
if timeout_tests.issuperset([bad_test]):
|
2016-05-30 23:02:33 -04:00
|
|
|
test_to_build_ids[bad_test]['timeout'].add(build)
|
2016-08-17 03:47:16 -04:00
|
|
|
is_bad = True
|
2016-05-24 23:39:54 -04:00
|
|
|
if failed_tests.issuperset([bad_test]):
|
2016-05-30 23:02:33 -04:00
|
|
|
test_to_build_ids[bad_test]['failed'].add(build)
|
2016-08-17 03:47:16 -04:00
|
|
|
is_bad = True
|
2016-05-30 23:02:33 -04:00
|
|
|
if hanging_tests.issuperset([bad_test]):
|
|
|
|
test_to_build_ids[bad_test]['hanging'].add(build)
|
2016-08-17 03:47:16 -04:00
|
|
|
is_bad = True
|
|
|
|
if is_bad:
|
|
|
|
test_to_build_ids[bad_test]['bad_count'] += 1
|
|
|
|
|
2016-09-27 14:46:20 -04:00
|
|
|
# Calculate flakyness % and successful builds for each test. Also sort build ids.
|
2016-08-17 03:47:16 -04:00
|
|
|
for bad_test in test_to_build_ids:
|
2016-09-27 14:46:20 -04:00
|
|
|
test_result = test_to_build_ids[bad_test]
|
|
|
|
test_result['flakyness'] = test_result['bad_count'] * 100.0 / len(test_result['all'])
|
|
|
|
test_result['success'] = (test_result['all'].difference(
|
|
|
|
test_result['failed'].union(test_result['hanging'])))
|
|
|
|
for key in ['all', 'timeout', 'failed', 'hanging', 'success']:
|
|
|
|
test_result[key] = sorted(test_result[key])
|
|
|
|
|
2016-08-17 03:47:16 -04:00
|
|
|
|
|
|
|
# Sort tests in descending order by flakyness.
|
|
|
|
sorted_test_to_build_ids = OrderedDict(
|
2022-10-28 06:41:47 -04:00
|
|
|
sorted(iter(test_to_build_ids.items()), key=lambda x: x[1]['flakyness'], reverse=True))
|
2016-08-17 03:47:16 -04:00
|
|
|
url_to_bad_test_results[url] = sorted_test_to_build_ids
|
|
|
|
|
|
|
|
if len(sorted_test_to_build_ids) > 0:
|
2022-10-28 06:41:47 -04:00
|
|
|
print("URL: {}".format(url))
|
|
|
|
print("{:>60} {:10} {:25} {}".format(
|
|
|
|
"Test Name", "Total Runs", "Bad Runs(failed/timeout/hanging)", "Flakyness"))
|
2016-08-17 03:47:16 -04:00
|
|
|
for bad_test in sorted_test_to_build_ids:
|
|
|
|
test_status = sorted_test_to_build_ids[bad_test]
|
2022-10-28 06:41:47 -04:00
|
|
|
print("{:>60} {:10} {:7} ( {:4} / {:5} / {:5} ) {:2.0f}%".format(
|
2016-08-17 03:47:16 -04:00
|
|
|
bad_test, len(test_status['all']), test_status['bad_count'],
|
|
|
|
len(test_status['failed']), len(test_status['timeout']),
|
2022-10-28 06:41:47 -04:00
|
|
|
len(test_status['hanging']), test_status['flakyness']))
|
2016-04-14 02:02:17 -04:00
|
|
|
else:
|
2022-10-28 06:41:47 -04:00
|
|
|
print("No flaky tests founds.")
|
2017-05-18 19:53:28 -04:00
|
|
|
if len(url_to_build_ids[url]) == len(build_ids_without_tests_run):
|
2022-10-28 06:41:47 -04:00
|
|
|
print("None of the analyzed builds have test result.")
|
2016-04-14 02:02:17 -04:00
|
|
|
|
2022-10-28 06:41:47 -04:00
|
|
|
print("Builds analyzed: {}".format(url_to_build_ids[url]))
|
|
|
|
print("Builds without any test runs: {}".format(build_ids_without_tests_run))
|
|
|
|
print("")
|
2016-04-14 02:02:17 -04:00
|
|
|
|
2016-05-24 23:39:54 -04:00
|
|
|
|
2016-05-31 05:29:40 -04:00
|
|
|
all_bad_tests = all_hanging_tests.union(all_failed_tests)
|
2016-04-14 02:02:17 -04:00
|
|
|
if args.mvn:
|
2016-05-24 23:39:54 -04:00
|
|
|
includes = ",".join(all_bad_tests)
|
2020-10-19 23:51:34 -04:00
|
|
|
with open(output_dir + "/includes", "w") as inc_file:
|
2016-04-14 02:02:17 -04:00
|
|
|
inc_file.write(includes)
|
2016-05-09 14:02:06 -04:00
|
|
|
|
2016-05-24 23:39:54 -04:00
|
|
|
excludes = ["**/{0}.java".format(bad_test) for bad_test in all_bad_tests]
|
2020-10-19 23:51:34 -04:00
|
|
|
with open(output_dir + "/excludes", "w") as exc_file:
|
2016-05-24 23:39:54 -04:00
|
|
|
exc_file.write(",".join(excludes))
|
|
|
|
|
2020-10-19 23:51:34 -04:00
|
|
|
with open(output_dir + "/timeout", "w") as timeout_file:
|
2016-08-09 17:50:25 -04:00
|
|
|
timeout_file.write(",".join(all_timeout_tests))
|
2016-05-24 23:39:54 -04:00
|
|
|
|
2020-10-19 23:51:34 -04:00
|
|
|
with open(output_dir + "/failed", "w") as failed_file:
|
2016-08-09 17:50:25 -04:00
|
|
|
failed_file.write(",".join(all_failed_tests))
|
2016-05-30 23:02:33 -04:00
|
|
|
|
2016-06-01 22:12:50 -04:00
|
|
|
dev_support_dir = os.path.dirname(os.path.abspath(__file__))
|
|
|
|
with open(os.path.join(dev_support_dir, "flaky-dashboard-template.html"), "r") as f:
|
|
|
|
template = Template(f.read())
|
2016-05-30 23:02:33 -04:00
|
|
|
|
2020-10-19 23:51:34 -04:00
|
|
|
with open(output_dir + "/dashboard.html", "w") as f:
|
2016-08-17 03:47:16 -04:00
|
|
|
datetime = time.strftime("%m/%d/%Y %H:%M:%S")
|
|
|
|
f.write(template.render(datetime=datetime, bad_tests_count=len(all_bad_tests),
|
2017-05-18 19:53:28 -04:00
|
|
|
results=url_to_bad_test_results, build_ids=url_to_build_ids))
|