HBASE-16384 Update report-flakies.py script to allow specifying a list of build ids to be excluded.
Also fixes some pylint errors. Change-Id: I4620756c277c36a1ddb6d6cbd4d3e380da8442d7
This commit is contained in:
parent
e51fcdd778
commit
7999bb9bd3
|
@ -31,7 +31,10 @@ BAD_RUN_STRINGS = [
|
|||
"The forked VM terminated without properly saying goodbye", # JVM crashed.
|
||||
]
|
||||
|
||||
# Returns [[all tests], [failed tests], [timeout tests], [hanging tests]]
|
||||
# Returns [[all tests], [failed tests], [timeout tests], [hanging tests]] if successfully gets
|
||||
# the build information.
|
||||
# If there is error getting console text or if there are blacklisted strings in console text,
|
||||
# then returns None.
|
||||
# Definitions:
|
||||
# All tests: All testcases which were run.
|
||||
# Hanging test: A testcase which started but never finished.
|
||||
|
@ -44,7 +47,7 @@ def get_bad_tests(console_url):
|
|||
if response.status_code != 200:
|
||||
print "Error getting consoleText. Response = {} {}".format(
|
||||
response.status_code, response.reason)
|
||||
return {}
|
||||
return
|
||||
|
||||
all_tests = set()
|
||||
hanging_tests = set()
|
||||
|
|
|
@ -16,27 +16,38 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# This script uses Jenkins REST api to collect test result(s) of given build/builds and generates
|
||||
# flakyness data about unittests.
|
||||
# Print help: report-flakies.py -h
|
||||
"""
|
||||
This script uses Jenkins REST api to collect test result(s) of given build/builds and generates
|
||||
flakyness data about unittests.
|
||||
Print help: report-flakies.py -h
|
||||
"""
|
||||
import argparse
|
||||
import findHangingTests
|
||||
from jinja2 import Template
|
||||
import os
|
||||
import logging
|
||||
import os
|
||||
import requests
|
||||
|
||||
import findHangingTests
|
||||
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--urls", metavar="url[ max-builds]", action="append", required=True,
|
||||
help="Urls to analyze, which can refer to simple projects, multi-configuration projects or "
|
||||
"individual build run. Optionally, specify maximum builds to analyze for this url "
|
||||
"(if available on jenkins) using space as separator. By default, all available "
|
||||
"builds are analyzed.")
|
||||
parser.add_argument("--mvn", action="store_true",
|
||||
parser.add_argument(
|
||||
'--urls', metavar='URL', action='append', required=True,
|
||||
help='Urls to analyze, which can refer to simple projects, multi-configuration projects or '
|
||||
'individual build run.')
|
||||
parser.add_argument('--excluded-builds', metavar='n1,n2', action='append',
|
||||
help='List of build numbers to exclude (or "None"). Not required, '
|
||||
'but if specified, number of uses should be same as that of --urls '
|
||||
'since the values are matched.')
|
||||
parser.add_argument('--max-builds', metavar='n', action='append', type=int,
|
||||
help='The maximum number of builds to use (if available on jenkins). Specify '
|
||||
'0 to analyze all builds. Not required, but if specified, number of uses '
|
||||
'should be same as that of --urls since the values are matched.')
|
||||
parser.add_argument(
|
||||
"--mvn", action="store_true",
|
||||
help="Writes two strings for including/excluding these flaky tests using maven flags. These "
|
||||
"strings are written to files so they can be saved as artifacts and easily imported in "
|
||||
"other projects. Also writes timeout and failing tests in separate files for "
|
||||
"reference.")
|
||||
"reference.")
|
||||
parser.add_argument("-v", "--verbose", help="Prints more logs.", action="store_true")
|
||||
args = parser.parse_args()
|
||||
|
||||
|
@ -46,39 +57,56 @@ if args.verbose:
|
|||
logger.setLevel(logging.INFO)
|
||||
|
||||
|
||||
# Given url of an executed build, analyzes its console text, and returns
|
||||
# [list of all tests, list of timeout tests, list of failed tests].
|
||||
def get_bad_tests(build_url):
|
||||
"""
|
||||
Given url of an executed build, analyzes its console text, and returns
|
||||
[list of all tests, list of timeout tests, list of failed tests].
|
||||
Returns None if can't get console text or if there is any other error.
|
||||
"""
|
||||
logger.info("Analyzing %s", build_url)
|
||||
json_response = requests.get(build_url + "/api/json").json()
|
||||
if json_response["building"]:
|
||||
response = requests.get(build_url + "/api/json").json()
|
||||
if response["building"]:
|
||||
logger.info("Skipping this build since it is in progress.")
|
||||
return {}
|
||||
console_url = build_url + "/consoleText"
|
||||
result = findHangingTests.get_bad_tests(console_url)
|
||||
if not result:
|
||||
logger.info("Ignoring build {}".format(build_url))
|
||||
return {}
|
||||
return result
|
||||
build_result = findHangingTests.get_bad_tests(console_url)
|
||||
if not build_result:
|
||||
logger.info("Ignoring build %s", build_url)
|
||||
return
|
||||
return build_result
|
||||
|
||||
|
||||
# If any url is of type multi-configuration project (i.e. has key 'activeConfigurations'),
|
||||
# get urls for individual jobs.
|
||||
def expand_multi_configuration_projects(urls_list):
|
||||
expanded_urls = []
|
||||
for url_max_build in urls_list:
|
||||
splits = url_max_build.split()
|
||||
url = splits[0]
|
||||
max_builds = 10000 # Some high value
|
||||
if len(splits) == 2:
|
||||
max_builds = int(splits[1])
|
||||
json_response = requests.get(url + "/api/json").json()
|
||||
if json_response.has_key("activeConfigurations"):
|
||||
for config in json_response["activeConfigurations"]:
|
||||
expanded_urls.append({'url':config["url"], 'max_builds': max_builds})
|
||||
def expand_multi_config_projects(cli_args):
|
||||
"""
|
||||
If any url is of type multi-configuration project (i.e. has key 'activeConfigurations'),
|
||||
get urls for individual jobs.
|
||||
"""
|
||||
job_urls = cli_args.urls
|
||||
excluded_builds_arg = cli_args.excluded_builds
|
||||
max_builds_arg = cli_args.max_builds
|
||||
if excluded_builds_arg is not None and len(excluded_builds_arg) != len(job_urls):
|
||||
raise Exception("Number of --excluded-builds arguments should be same as that of --urls "
|
||||
"since values are matched.")
|
||||
if max_builds_arg is not None and len(max_builds_arg) != len(job_urls):
|
||||
raise Exception("Number of --max-builds arguments should be same as that of --urls "
|
||||
"since values are matched.")
|
||||
final_expanded_urls = []
|
||||
for (i, job_url) in enumerate(job_urls):
|
||||
max_builds = 10000 # Some high number
|
||||
if max_builds_arg is not None and max_builds_arg[i] != 0:
|
||||
max_builds = int(max_builds_arg[i])
|
||||
excluded_builds = []
|
||||
if excluded_builds_arg is not None and excluded_builds_arg[i] != "None":
|
||||
excluded_builds = [int(x) for x in excluded_builds_arg[i].split(",")]
|
||||
response = requests.get(job_url + "/api/json").json()
|
||||
if response.has_key("activeConfigurations"):
|
||||
for config in response["activeConfigurations"]:
|
||||
final_expanded_urls.append({'url':config["url"], 'max_builds': max_builds,
|
||||
'excludes': excluded_builds})
|
||||
else:
|
||||
expanded_urls.append({'url':url, 'max_builds': max_builds})
|
||||
return expanded_urls
|
||||
final_expanded_urls.append({'url':job_url, 'max_builds': max_builds,
|
||||
'excludes': excluded_builds})
|
||||
return final_expanded_urls
|
||||
|
||||
|
||||
# Set of timeout/failed tests across all given urls.
|
||||
|
@ -90,9 +118,10 @@ all_hanging_tests = set()
|
|||
url_to_bad_test_results = {}
|
||||
|
||||
# Iterates over each url, gets test results and prints flaky tests.
|
||||
expanded_urls = expand_multi_configuration_projects(args.urls)
|
||||
expanded_urls = expand_multi_config_projects(args)
|
||||
for url_max_build in expanded_urls:
|
||||
url = url_max_build["url"]
|
||||
excludes = url_max_build["excludes"]
|
||||
json_response = requests.get(url + "/api/json").json()
|
||||
if json_response.has_key("builds"):
|
||||
builds = json_response["builds"]
|
||||
|
@ -106,15 +135,17 @@ for url_max_build in expanded_urls:
|
|||
build_ids_without_tests_run = []
|
||||
for build in builds:
|
||||
build_id = build["number"]
|
||||
build_ids.append(build_id)
|
||||
if build_id in excludes:
|
||||
continue
|
||||
result = get_bad_tests(build["url"])
|
||||
if result == {}:
|
||||
if not result:
|
||||
continue
|
||||
if len(result[0]) > 0:
|
||||
build_id_to_results[build_id] = result
|
||||
else:
|
||||
build_ids_without_tests_run.append(build_id)
|
||||
num_builds += 1
|
||||
build_ids.append(build_id)
|
||||
if num_builds == url_max_build["max_builds"]:
|
||||
break
|
||||
|
||||
|
@ -130,7 +161,7 @@ for url_max_build in expanded_urls:
|
|||
|
||||
# For each bad test, get build ids where it ran, timed out, failed or hanged.
|
||||
test_to_build_ids = {key : {'all' : set(), 'timeout': set(), 'failed': set(), 'hanging' : set()}
|
||||
for key in bad_tests}
|
||||
for key in bad_tests}
|
||||
for build in build_id_to_results:
|
||||
[all_tests, failed_tests, timeout_tests, hanging_tests] = build_id_to_results[build]
|
||||
for bad_test in test_to_build_ids:
|
||||
|
@ -176,11 +207,11 @@ if args.mvn:
|
|||
with open("./excludes", "w") as exc_file:
|
||||
exc_file.write(",".join(excludes))
|
||||
|
||||
with open("./timeout", "w") as file:
|
||||
file.write(",".join(all_timeout_tests))
|
||||
with open("./timeout", "w") as timeout_file:
|
||||
timeout_file.write(",".join(all_timeout_tests))
|
||||
|
||||
with open("./failed", "w") as file:
|
||||
file.write(",".join(all_failed_tests))
|
||||
with open("./failed", "w") as failed_file:
|
||||
failed_file.write(",".join(all_failed_tests))
|
||||
|
||||
dev_support_dir = os.path.dirname(os.path.abspath(__file__))
|
||||
with open(os.path.join(dev_support_dir, "flaky-dashboard-template.html"), "r") as f:
|
||||
|
|
Loading…
Reference in New Issue