4d88b4bc26
Currently we run all benchmark perf tests in CircleCI. Since we do not collect any results, we unnecessarily waste CI/RBE resources. Instead, we should just not run benchmark perf tests in CI, but still run the functionality e2e tests which ensure that benchmarks are not broken. We can do this by splitting the perf and e2e tests into separate files/targets. PR Close #34753
26 lines
848 B
Python
26 lines
848 B
Python
load("//tools:defaults.bzl", "protractor_web_test_suite")
|
|
|
|
"""
|
|
Macro that can be used to define a benchmark test. This differentiates from
|
|
a normal Protractor test suite because we specify a custom "perf" configuration
|
|
that sets up "@angular/benchpress". Benchmark test targets will not run on CI
|
|
unless explicitly requested.
|
|
"""
|
|
|
|
def benchmark_test(name, server, deps, tags = []):
|
|
protractor_web_test_suite(
|
|
name = name,
|
|
configuration = "//:protractor-perf.conf.js",
|
|
data = [
|
|
"//packages/benchpress",
|
|
],
|
|
on_prepare = "//modules/benchmarks:start-server.js",
|
|
server = server,
|
|
# Benchmark targets should not run on CI by default.
|
|
tags = tags + ["manual"],
|
|
test_suite_tags = ["manual"],
|
|
deps = [
|
|
"@npm//yargs",
|
|
] + deps,
|
|
)
|