angular-cn/.circleci/config.yml

926 lines
41 KiB
YAML
Raw Normal View History

# Configuration file for https://circleci.com/gh/angular/angular
# Note: YAML anchors allow an object to be re-used, reducing duplication.
# The ampersand declares an alias for an object, then later the `<<: *name`
# syntax dereferences it.
# See http://blog.daemonl.com/2016/02/yaml.html
# To validate changes, use an online parser, eg.
# http://yaml-online-parser.appspot.com/
# CircleCI configuration version
# Version 2.1 allows for extra config reuse features
# https://circleci.com/docs/2.0/reusing-config/#getting-started-with-config-reuse
version: 2.1
# We don't want to include the current branch name in the cache key because that would prevent
# PRs from being able to restore the cache since the branch names are always different for PRs.
# The cache key should only consist of dynamic values that change whenever something in the
# cache changes. For example:
# 1) yarn lock file changes --> cached "node_modules" are different.
# 2) bazel repository definitions change --> cached bazel repositories are different.
# Windows needs its own cache key because binaries in node_modules are different.
# **NOTE 1 **: If you change the cache key prefix, also sync the cache_key_fallback to match.
# **NOTE 2 **: Keep the static part of the cache key as prefix to enable correct fallbacks.
# See https://circleci.com/docs/2.0/caching/#restoring-cache for how prefixes work in CircleCI.
var_3: &cache_key v7-angular-node-12-{{ checksum ".bazelversion" }}-{{ checksum "yarn.lock" }}-{{ checksum "WORKSPACE" }}-{{ checksum "packages/bazel/package.bzl" }}-{{ checksum "aio/yarn.lock" }}
# We invalidate the cache if the Bazel version changes because otherwise the `bazelisk` cache
# folder will contain all previously used versions and ultimately cause the cache restoring to
# be slower due to its growing size.
var_4: &cache_key_fallback v7-angular-node-12-{{ checksum ".bazelversion" }}
var_3_win: &cache_key_win v7-angular-win-node-12-{{ checksum ".bazelversion" }}-{{ checksum "yarn.lock" }}-{{ checksum "WORKSPACE" }}-{{ checksum "packages/bazel/package.bzl" }}-{{ checksum "aio/yarn.lock" }}
var_4_win: &cache_key_win_fallback v7-angular-win-node-12-{{ checksum ".bazelversion" }}
# Cache key for the `components-repo-unit-tests` job. **Note** when updating the SHA in the
# cache keys also update the SHA for the "COMPONENTS_REPO_COMMIT" environment variable.
var_5: &components_repo_unit_tests_cache_key v9-angular-components-09e68db8ed5b1253f2fe38ff954ef0df019fc25a
var_6: &components_repo_unit_tests_cache_key_fallback v9-angular-components-
# Workspace initially persisted by the `setup` job, and then enhanced by `build-npm-packages` and
# `build-ivy-npm-packages`.
# https://circleci.com/docs/2.0/workflows/#using-workspaces-to-share-data-among-jobs
# https://circleci.com/blog/deep-diving-into-circleci-workspaces/
var_7: &workspace_location ~/
# Filter to run a job on builds for pull requests only.
var_8: &only_on_pull_requests
filters:
branches:
only:
- /pull\/\d+/
# Filter to skip a job on builds for pull requests.
var_9: &skip_on_pull_requests
filters:
branches:
ignore:
- /pull\/\d+/
# Filter to run a job on builds for the master branch only.
var_10: &only_on_master
filters:
branches:
only:
- master
# Executor Definitions
# https://circleci.com/docs/2.0/reusing-config/#authoring-reusable-executors
# **NOTE 1**: Pin to exact images using an ID (SHA). See https://circleci.com/docs/2.0/circleci-images/#using-a-docker-image-id-to-pin-an-image-to-a-fixed-version.
# (Using the tag in not necessary when pinning by ID, but include it anyway for documentation purposes.)
# **NOTE 2**: If you change the version of the docker images, also change the `cache_key` suffix.
executors:
default-executor:
parameters:
resource_class:
type: string
default: medium
docker:
- image: circleci/node:12.14.1@sha256:f9de24fc0017059cc42ef7d07db060008af65a98b1f0cdd1ef3339213226bf6d
resource_class: << parameters.resource_class >>
working_directory: ~/ng
windows-executor:
working_directory: ~/ng
resource_class: windows.2xlarge
# CircleCI windows VMs do have the GitBash shell available:
# https://github.com/CircleCI-Public/windows-preview-docs#shells
# But in this specific case we really should not use it because Bazel must not be ran from
# GitBash. These issues discuss why:
# https://github.com/bazelbuild/bazel/issues/5751
# https://github.com/bazelbuild/bazel/issues/5724#issuecomment-410194038
# https://github.com/bazelbuild/bazel/issues/6339#issuecomment-441600879
shell: powershell.exe -ExecutionPolicy Bypass
machine:
# Windows preview image that includes the following:
# - Visual Studio 2019 build tools
# - Node 12
# - yarn 1.17
# - Python 3 3.7.4
image: windows-server-2019-vs2019:201908-02
# Command Definitions
# https://circleci.com/docs/2.0/reusing-config/#authoring-reusable-commands
commands:
custom_attach_workspace:
description: Attach workspace at a predefined location
steps:
- attach_workspace:
at: *workspace_location
test: use puppeteer in integration tests and to download correct chromedriver (#35049) This means integration tests no longer need to depend on a $CI_CHROMEDRIVER_VERSION_ARG environment variable to specify which chromedriver version to download to match the locally installed chrome. This was bad DX and not having it specified was not reliable as webdriver-manager would not always download the chromedriver version to work with the locally installed chrome. webdriver-manager update --gecko=false --standalone=false $CI_CHROMEDRIVER_VERSION_ARG is now replaced with node webdriver-manager-update.js in the root package.json, which checks which version of chrome puppeteer has come bundled with & downloads informs webdriver-manager to download the corresponding chrome driver version. Integration tests now use "webdriver-manager": "file:../../node_modules/webdriver-manager" so they don't have to waste time calling webdriver-manager update in postinstall "// resolutions": "Ensure a single version of webdriver-manager which comes from root node_modules that has already run webdriver-manager update", "resolutions": { "**/webdriver-manager": "file:../../node_modules/webdriver-manager" } This should speed up each integration postinstall by a few seconds. Further, integration test package.json files link puppeteer via file:../../node_modules/puppeteer which is the ideal situation as the puppeteer post-install won't download chrome if it is already downloaded. In CI, since node_modules is cached it should not need to download Chrome either unless the node_modules cache is busted. NB: each version of puppeteer comes bundles with a specific version of chrome. Root package.json & yarn.lock currently pull down puppeteer 2.1.0 which comes with chrome 80. See https://github.com/puppeteer/puppeteer#q-which-chromium-version-does-puppeteer-use for more info. Only two references to CI_CHROMEDRIVER_VERSION_ARG left in integration tests at integration/bazel-schematics/test.sh which I'm not entirely sure how to get rid of it Use a lightweight puppeteer=>chrome version mapping instead of launching chrome and calling browser.version() Launching puppeteer headless chrome and calling browser.version() was a heavy-handed approach to determine the Chrome version. A small and easy to update mappings file is a better solution and it means that the `yarn install` step does not require chrome shared libs available on the system for its postinstall step PR Close #35049
2020-01-31 18:50:44 -05:00
# Install shared libs used by Chrome that is either provisioned by
# rules_webtesting or by puppeteer.
test: use puppeteer in integration tests and to download correct chromedriver (#35049) This means integration tests no longer need to depend on a $CI_CHROMEDRIVER_VERSION_ARG environment variable to specify which chromedriver version to download to match the locally installed chrome. This was bad DX and not having it specified was not reliable as webdriver-manager would not always download the chromedriver version to work with the locally installed chrome. webdriver-manager update --gecko=false --standalone=false $CI_CHROMEDRIVER_VERSION_ARG is now replaced with node webdriver-manager-update.js in the root package.json, which checks which version of chrome puppeteer has come bundled with & downloads informs webdriver-manager to download the corresponding chrome driver version. Integration tests now use "webdriver-manager": "file:../../node_modules/webdriver-manager" so they don't have to waste time calling webdriver-manager update in postinstall "// resolutions": "Ensure a single version of webdriver-manager which comes from root node_modules that has already run webdriver-manager update", "resolutions": { "**/webdriver-manager": "file:../../node_modules/webdriver-manager" } This should speed up each integration postinstall by a few seconds. Further, integration test package.json files link puppeteer via file:../../node_modules/puppeteer which is the ideal situation as the puppeteer post-install won't download chrome if it is already downloaded. In CI, since node_modules is cached it should not need to download Chrome either unless the node_modules cache is busted. NB: each version of puppeteer comes bundles with a specific version of chrome. Root package.json & yarn.lock currently pull down puppeteer 2.1.0 which comes with chrome 80. See https://github.com/puppeteer/puppeteer#q-which-chromium-version-does-puppeteer-use for more info. Only two references to CI_CHROMEDRIVER_VERSION_ARG left in integration tests at integration/bazel-schematics/test.sh which I'm not entirely sure how to get rid of it Use a lightweight puppeteer=>chrome version mapping instead of launching chrome and calling browser.version() Launching puppeteer headless chrome and calling browser.version() was a heavy-handed approach to determine the Chrome version. A small and easy to update mappings file is a better solution and it means that the `yarn install` step does not require chrome shared libs available on the system for its postinstall step PR Close #35049
2020-01-31 18:50:44 -05:00
install_chrome_libs:
description: Install shared Chrome libs
steps:
- run:
name: Install shared Chrome libs
command: |
sudo apt-get update
# Install GTK+ graphical user interface (libgtk-3-0), advanced linux sound architecture (libasound2)
# and network security service libraries (libnss3) & X11 Screen Saver extension library (libssx1)
# which are dependencies of chrome & needed for karma & protractor headless chrome tests.
test: use puppeteer in integration tests and to download correct chromedriver (#35049) This means integration tests no longer need to depend on a $CI_CHROMEDRIVER_VERSION_ARG environment variable to specify which chromedriver version to download to match the locally installed chrome. This was bad DX and not having it specified was not reliable as webdriver-manager would not always download the chromedriver version to work with the locally installed chrome. webdriver-manager update --gecko=false --standalone=false $CI_CHROMEDRIVER_VERSION_ARG is now replaced with node webdriver-manager-update.js in the root package.json, which checks which version of chrome puppeteer has come bundled with & downloads informs webdriver-manager to download the corresponding chrome driver version. Integration tests now use "webdriver-manager": "file:../../node_modules/webdriver-manager" so they don't have to waste time calling webdriver-manager update in postinstall "// resolutions": "Ensure a single version of webdriver-manager which comes from root node_modules that has already run webdriver-manager update", "resolutions": { "**/webdriver-manager": "file:../../node_modules/webdriver-manager" } This should speed up each integration postinstall by a few seconds. Further, integration test package.json files link puppeteer via file:../../node_modules/puppeteer which is the ideal situation as the puppeteer post-install won't download chrome if it is already downloaded. In CI, since node_modules is cached it should not need to download Chrome either unless the node_modules cache is busted. NB: each version of puppeteer comes bundles with a specific version of chrome. Root package.json & yarn.lock currently pull down puppeteer 2.1.0 which comes with chrome 80. See https://github.com/puppeteer/puppeteer#q-which-chromium-version-does-puppeteer-use for more info. Only two references to CI_CHROMEDRIVER_VERSION_ARG left in integration tests at integration/bazel-schematics/test.sh which I'm not entirely sure how to get rid of it Use a lightweight puppeteer=>chrome version mapping instead of launching chrome and calling browser.version() Launching puppeteer headless chrome and calling browser.version() was a heavy-handed approach to determine the Chrome version. A small and easy to update mappings file is a better solution and it means that the `yarn install` step does not require chrome shared libs available on the system for its postinstall step PR Close #35049
2020-01-31 18:50:44 -05:00
# This is a very small install which takes around 7s in comparing to using the full
# circleci/node:x.x.x-browsers image.
sudo apt-get -y install libgtk-3-0 libasound2 libnss3 libxss1
# Install java runtime which is required by some integration tests such as
# //integration:hello_world__closure_test, //integration:i18n_test and
# //integration:ng_elements_test to run the closure compiler
install_java:
description: Install java
steps:
- run:
name: Install java
command: |
sudo apt-get update
# Install java runtime
sudo apt-get install default-jre
ci(docs-infra): fix failure in `aio_monitoring_stable` due to yarn version mismatch (#34451) The `aio_monitoring_stable` CI job is triggered as a cronjob on the master branch and its purpose is to run some e2e tests against the deployed stable version of the docs web-app at https://angular.io/. In order for the tests to be compatible with the deployed version of the web-app (which gets deployed from the stable branch), the stable branch is checked out in git as part of the CI job. Previously, we only checked out the `aio/` directory from the stable branch, leaving the rest of the code at master. This doesn't matter as long as the commands used to run the tests do not rely on code outside of `aio/`. However, it turns out that there _is_ code outside of `aio/` that affects the executed commands: It is our vendored version of yarn (in `third_party/github.com/yarnpkg/`), which overwrites the global yarn installed on the docker image on CI and must match the version range specified in `aio/package.json > engines`. Using the yarn version checked out from the master branch with the `aio/` code checked out from the stable branch can lead to failures such as [this one][1]. This commit fixes the problem by checking out both the `aio/` and `third_party/github.com/yarnpkg/` directories from the stable branch and re-running the steps to overwrite the global yarn executable with our own version from `third_party/github.com/yarnpkg/`. This ensures that the version of yarn used will be compatible with the version range specified in `aio/package.json > engines`. NOTE: We cannot checkout everything from the stable branch, since the CI config (`.circleci/config.yml` from the master branch) may try to run certain scripts (such as `.circleci/get-vendored-yarn-path.js`) that are not available on the stable branch. Therefore, we should only check out the necessary bits from the stable branch. [1]: https://circleci.com/gh/angular/angular/567315 PR Close #34451
2019-12-17 12:05:22 -05:00
# Initializes the CI environment by setting up common environment variables.
init_environment:
description: Initializing environment (setting up variables)
ci(docs-infra): fix failure in `aio_monitoring_stable` due to yarn version mismatch (#34451) The `aio_monitoring_stable` CI job is triggered as a cronjob on the master branch and its purpose is to run some e2e tests against the deployed stable version of the docs web-app at https://angular.io/. In order for the tests to be compatible with the deployed version of the web-app (which gets deployed from the stable branch), the stable branch is checked out in git as part of the CI job. Previously, we only checked out the `aio/` directory from the stable branch, leaving the rest of the code at master. This doesn't matter as long as the commands used to run the tests do not rely on code outside of `aio/`. However, it turns out that there _is_ code outside of `aio/` that affects the executed commands: It is our vendored version of yarn (in `third_party/github.com/yarnpkg/`), which overwrites the global yarn installed on the docker image on CI and must match the version range specified in `aio/package.json > engines`. Using the yarn version checked out from the master branch with the `aio/` code checked out from the stable branch can lead to failures such as [this one][1]. This commit fixes the problem by checking out both the `aio/` and `third_party/github.com/yarnpkg/` directories from the stable branch and re-running the steps to overwrite the global yarn executable with our own version from `third_party/github.com/yarnpkg/`. This ensures that the version of yarn used will be compatible with the version range specified in `aio/package.json > engines`. NOTE: We cannot checkout everything from the stable branch, since the CI config (`.circleci/config.yml` from the master branch) may try to run certain scripts (such as `.circleci/get-vendored-yarn-path.js`) that are not available on the stable branch. Therefore, we should only check out the necessary bits from the stable branch. [1]: https://circleci.com/gh/angular/angular/567315 PR Close #34451
2019-12-17 12:05:22 -05:00
steps:
- run:
name: Set up environment
environment:
CIRCLE_GIT_BASE_REVISION: << pipeline.git.base_revision >>
CIRCLE_GIT_REVISION: << pipeline.git.revision >>
command: ./.circleci/env.sh
- run:
# Configure git as the CircleCI `checkout` command does.
# This is needed because we only checkout on the setup job.
# Add GitHub to known hosts
name: Configure git
command: |
mkdir -p ~/.ssh
echo 'github.com ssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAq2A7hRGmdnm9tUDbO9IDSwBK6TbQa+PXYPCPy6rbTrTtw7PHkccKrpp0yVhp5HdEIcKr6pLlVDBfOLX9QUsyCOV0wzfjIJNlGEYsdlLJizHhbn2mUjvSAHQqZETYP81eFzLQNnPHt4EVVUh7VfDESU84KezmD5QlWpXLmvU31/yMf+Se8xhHTvKSCZIFImWwoG6mbUoWf9nzpIoaSjB+weqqUUmpaaasXVal72J+UX2B+2RPW3RcT0eOzQgqlJL3RKrTJvdsjE3JEAvGq3lGHSZXy28G3skua2SmVi/w4yCE6gbODqnTWlg7+wC604ydGXA8VJiS5ap43JXiUFFAaQ==' >> ~/.ssh/known_hosts
git config --global url."ssh://git@github.com".insteadOf "https://github.com" || true
git config --global gc.auto 0 || true
2020-02-06 08:17:03 -05:00
init_saucelabs_environment:
description: Sets up a domain that resolves to the local host.
steps:
- run:
name: Preparing environment for running tests on Sauce Labs.
2020-02-06 08:17:03 -05:00
command: |
# For SauceLabs jobs, we set up a domain which resolves to the machine which launched
# the tunnel. We do this because devices are sometimes not able to properly resolve
# `localhost` or `127.0.0.1` through the SauceLabs tunnel. Using a domain that does not
# resolve to anything on SauceLabs VMs ensures that such requests are always resolved
# through the tunnel, and resolve to the actual tunnel host machine (i.e. the CircleCI VM).
# More context can be found in: https://github.com/angular/angular/pull/35171.
setPublicVar SAUCE_LOCALHOST_ALIAS_DOMAIN "angular-ci.local"
setSecretVar SAUCE_ACCESS_KEY $(echo $SAUCE_ACCESS_KEY | rev)
- run:
# Sets up a local domain in the machine's host file that resolves to the local
# host. This domain is helpful in Sauce Labs tests where devices are not able to
2020-02-06 08:17:03 -05:00
# properly resolve `localhost` or `127.0.0.1` through the sauce-connect tunnel.
name: Setting up alias domain for local host.
command: echo "127.0.0.1 $SAUCE_LOCALHOST_ALIAS_DOMAIN" | sudo tee -a /etc/hosts
# Normally this would be an individual job instead of a command.
# But startup and setup time for each individual windows job are high enough to discourage
# many small jobs, so instead we use a command for setup unless the gain becomes significant.
setup_win:
description: Setup windows node environment
steps:
# Use the Linux workspace directly, as it already has checkout, rebased and node modules.
- custom_attach_workspace
# Install Bazel pre-requisites that aren't in the preconfigured CircleCI Windows VM.
- run: ./.circleci/windows-env.ps1
- run: node --version
- run: yarn --version
- restore_cache:
keys:
- *cache_key_win
- *cache_key_win_fallback
# Reinstall to get windows binaries.
- run: yarn install --frozen-lockfile --non-interactive
# Install @bazel/bazelisk globally and use that for the first run.
# Workaround for https://github.com/bazelbuild/rules_nodejs/issues/894
# NB: the issue was for @bazel/bazel but the same problem applies to @bazel/bazelisk
- run: yarn global add @bazel/bazelisk@$env:BAZELISK_VERSION
- run: bazelisk info
notify_webhook_on_fail:
description: Notify a webhook about failure
parameters:
# `webhook_url_env_var` are secret env vars defined in CircleCI project settings.
# The URLs come from https://angular-team.slack.com/apps/A0F7VRE7N-circleci.
webhook_url_env_var:
type: env_var_name
steps:
- run:
when: on_fail
command: |
notificationJson="{\"text\":\":x: \`$CIRCLE_JOB\` job for $CIRCLE_BRANCH branch failed on build $CIRCLE_BUILD_NUM: $CIRCLE_BUILD_URL :scream:\"}"
curl --request POST --header "Content-Type: application/json" --data "$notificationJson" ${<< parameters.webhook_url_env_var >>}
# Job definitions
# Jobs can include parameters that are passed in the workflow job invocation.
# https://circleci.com/docs/2.0/reusing-config/#authoring-parameterized-jobs
jobs:
setup:
executor: default-executor
steps:
- checkout
- init_environment
- run:
name: Rebase PR on target branch
# After checkout, rebase on top of target branch.
command: >
if [[ -n "${CIRCLE_PR_NUMBER}" ]]; then
# User is required for rebase.
git config user.name "angular-ci"
git config user.email "angular-ci"
# Rebase PR on top of target branch.
node tools/rebase-pr.js
else
echo "This build is not over a PR, nothing to do."
fi
# This cache is saved in the build-npm-packages so that Bazel cache is also included.
- restore_cache:
keys:
- *cache_key
- *cache_key_fallback
- run:
name: Running Yarn install
command: yarn install --frozen-lockfile --non-interactive
# Yarn's requests sometimes take more than 10mins to complete.
no_output_timeout: 45m
- run: yarn --cwd aio install --frozen-lockfile --non-interactive
# Make the bazel directories and add a file to them if they don't exist already so that
# persist_to_workspace does not fail.
- run: |
if [ ! -d ~/bazel_repository_cache ]; then
mkdir ~/bazel_repository_cache
touch ~/bazel_repository_cache/MARKER
fi
# Persist any changes at this point to be reused by further jobs.
# **NOTE**: To add new content to the workspace, always persist on the same root.
- persist_to_workspace:
root: *workspace_location
paths:
- ./ng
- ./bazel_repository_cache
lint:
executor: default-executor
steps:
- custom_attach_workspace
- init_environment
- run: yarn -s tslint
- run: yarn -s ng-dev format changed $CI_GIT_BASE_REVISION --check
- run: yarn -s ts-circular-deps:check
- run: yarn -s ng-dev pullapprove verify
- run: yarn -s ng-dev ngbot verify
- run: yarn -s ng-dev commit-message validate-range --range $CI_COMMIT_RANGE
test:
executor:
name: default-executor
# Now that large integration tests are running locally in parallel (they can't run on RBE yet
# as they require network access for yarn install), this test is running out of memory
# consistently with the xlarge machine.
# TODO: switch back to xlarge once integration tests are running on remote-exec
resource_class: 2xlarge+
steps:
- custom_attach_workspace
- init_environment
build: add npm_integration_test && angular_integration_test (#33927) * it's tricky to get out of the runfiles tree with `bazel test` as `BUILD_WORKSPACE_DIRECTORY` is not set but I employed a trick to read the `DO_NOT_BUILD_HERE` file that is one level up from `execroot` and that contains the workspace directory. This is experimental and if `bazel test //:test.debug` fails than `bazel run` is still guaranteed to work as `BUILD_WORKSPACE_DIRECTORY` will be set in that context * test //integration:bazel_test and //integration:bazel-schematics_test exclusively * run "exclusive" and "manual" bazel-in-bazel integration tests in their own CI job as they take 8m+ to execute ``` //integration:bazel-schematics_test PASSED in 317.2s //integration:bazel_test PASSED in 167.8s ``` * Skip all integration tests that are now handled by angular_integration_test except the tests that are tracked for payload size; these are: - cli-hello-world* - hello_world__closure * add & pin @babel deps as newer versions of babel break //packages/localize/src/tools/test:test @babel/core dep had to be pinned to 7.6.4 or else //packages/localize/src/tools/test:test failed. Also //packages/localize uses @babel/generator, @babel/template, @babel/traverse & @babel/types so these deps were added to package.json as they were not being hoisted anymore from @babel/core transitive. NB: integration/hello_world__systemjs_umd test must run with systemjs 0.20.0 NB: systemjs must be at 0.18.10 for legacy saucelabs job to pass NB: With Bazel 2.0, the glob for the files to test `"integration/bazel/**"` is empty if integation/bazel is in .bazelignore. This glob worked under these conditions with 1.1.0. I did not bother testing with 1.2.x as not having integration/bazel in .bazelignore is correct. PR Close #33927
2020-02-04 14:45:40 -05:00
- install_chrome_libs
- install_java
- run:
command: yarn bazel test //... --build_tag_filters=-ivy-only --test_tag_filters=-ivy-only
no_output_timeout: 20m
# Temporary job to test what will happen when we flip the Ivy flag to true
test_ivy_aot:
executor:
name: default-executor
resource_class: xlarge
steps:
- custom_attach_workspace
- init_environment
build: add npm_integration_test && angular_integration_test (#33927) * it's tricky to get out of the runfiles tree with `bazel test` as `BUILD_WORKSPACE_DIRECTORY` is not set but I employed a trick to read the `DO_NOT_BUILD_HERE` file that is one level up from `execroot` and that contains the workspace directory. This is experimental and if `bazel test //:test.debug` fails than `bazel run` is still guaranteed to work as `BUILD_WORKSPACE_DIRECTORY` will be set in that context * test //integration:bazel_test and //integration:bazel-schematics_test exclusively * run "exclusive" and "manual" bazel-in-bazel integration tests in their own CI job as they take 8m+ to execute ``` //integration:bazel-schematics_test PASSED in 317.2s //integration:bazel_test PASSED in 167.8s ``` * Skip all integration tests that are now handled by angular_integration_test except the tests that are tracked for payload size; these are: - cli-hello-world* - hello_world__closure * add & pin @babel deps as newer versions of babel break //packages/localize/src/tools/test:test @babel/core dep had to be pinned to 7.6.4 or else //packages/localize/src/tools/test:test failed. Also //packages/localize uses @babel/generator, @babel/template, @babel/traverse & @babel/types so these deps were added to package.json as they were not being hoisted anymore from @babel/core transitive. NB: integration/hello_world__systemjs_umd test must run with systemjs 0.20.0 NB: systemjs must be at 0.18.10 for legacy saucelabs job to pass NB: With Bazel 2.0, the glob for the files to test `"integration/bazel/**"` is empty if integation/bazel is in .bazelignore. This glob worked under these conditions with 1.1.0. I did not bother testing with 1.2.x as not having integration/bazel in .bazelignore is correct. PR Close #33927
2020-02-04 14:45:40 -05:00
- install_chrome_libs
# We need to explicitly specify the --symlink_prefix option because otherwise we would
# not be able to easily find the output bin directory when uploading artifacts for size
# measurements.
- run:
command: yarn test-ivy-aot //... --symlink_prefix=dist/
no_output_timeout: 20m
# Publish bundle artifacts which will be used to calculate the size change. **Note**: Make
# sure that the size plugin from the Angular robot fetches the artifacts from this CircleCI
# job (see .github/angular-robot.yml). Additionally any artifacts need to be stored with the
# following path format: "{projectName}/{context}/{fileName}". This format is necessary
# because otherwise the bot is not able to pick up the artifacts from CircleCI. See:
# https://github.com/angular/github-robot/blob/master/functions/src/plugins/size.ts#L392-L394
- store_artifacts:
path: dist/bin/packages/core/test/bundling/hello_world/bundle.min.js
destination: core/hello_world/bundle
- store_artifacts:
path: dist/bin/packages/core/test/bundling/todo/bundle.min.js
destination: core/todo/bundle
- store_artifacts:
path: dist/bin/packages/core/test/bundling/hello_world/bundle.min.js.br
destination: core/hello_world/bundle.br
- store_artifacts:
path: dist/bin/packages/core/test/bundling/todo/bundle.min.js.br
destination: core/todo/bundle.br
# NOTE: This is currently limited to master builds only. See the `monitoring` configuration.
saucelabs_view_engine:
executor:
name: default-executor
# In order to avoid the bottleneck of having a slow host machine, we acquire a better
# container for this job. This is necessary because we launch a lot of browsers concurrently
# and therefore the tunnel and Karma need to process a lot of file requests and tests.
resource_class: xlarge
steps:
- custom_attach_workspace
- init_environment
2020-02-06 08:17:03 -05:00
- init_saucelabs_environment
- run:
name: Run Bazel tests on Saucelabs with ViewEngine
# See /tools/saucelabs/README.md for more info
command: |
yarn bazel run //tools/saucelabs:sauce_service_setup
TESTS=$(./node_modules/.bin/bazelisk query --output label '(kind(karma_web_test, ...) intersect attr("tags", "saucelabs", ...)) except attr("tags", "ivy-only", ...) except attr("tags", "fixme-saucelabs-ve", ...)')
yarn bazel test --config=saucelabs ${TESTS}
yarn bazel run //tools/saucelabs:sauce_service_stop
no_output_timeout: 40m
- notify_webhook_on_fail:
webhook_url_env_var: SLACK_DEV_INFRA_CI_FAILURES_WEBHOOK_URL
# NOTE: This is currently limited to master builds only. See the `monitoring` configuration.
saucelabs_ivy:
executor:
name: default-executor
# In order to avoid the bottleneck of having a slow host machine, we acquire a better
# container for this job. This is necessary because we launch a lot of browsers concurrently
# and therefore the tunnel and Karma need to process a lot of file requests and tests.
resource_class: xlarge
steps:
- custom_attach_workspace
- init_environment
2020-02-06 08:17:03 -05:00
- init_saucelabs_environment
- run:
name: Run Bazel tests on Saucelabs with Ivy
# See /tools/saucelabs/README.md for more info
command: |
yarn bazel run //tools/saucelabs:sauce_service_setup
TESTS=$(./node_modules/.bin/bazelisk query --output label '(kind(karma_web_test, ...) intersect attr("tags", "saucelabs", ...)) except attr("tags", "no-ivy-aot", ...) except attr("tags", "fixme-saucelabs-ivy", ...)')
yarn bazel test --config=saucelabs --config=ivy ${TESTS}
yarn bazel run //tools/saucelabs:sauce_service_stop
no_output_timeout: 40m
- notify_webhook_on_fail:
webhook_url_env_var: SLACK_DEV_INFRA_CI_FAILURES_WEBHOOK_URL
test_aio:
executor: default-executor
steps:
- custom_attach_workspace
- init_environment
- install_chrome_libs
# Build aio
- run: yarn --cwd aio build --progress=false
# Lint the code
- run: yarn --cwd aio lint
# Run unit tests
- run: yarn --cwd aio test --progress=false --watch=false
# Run e2e tests
- run: yarn --cwd aio e2e --configuration=ci
# Run PWA-score tests
- run: yarn --cwd aio test-pwa-score-localhost $CI_AIO_MIN_PWA_SCORE
# Run accessibility tests
- run: yarn --cwd aio test-a11y-score-localhost
# Check the bundle sizes.
- run: yarn --cwd aio payload-size
# Run unit tests for Firebase redirects
- run: yarn --cwd aio redirects-test
deploy_aio:
executor: default-executor
steps:
- custom_attach_workspace
- init_environment
- install_chrome_libs
# Deploy angular.io to production (if necessary)
- run: setPublicVar_CI_STABLE_BRANCH
- run: yarn --cwd aio deploy-production
test_aio_local:
parameters:
viewengine:
type: boolean
default: false
executor: default-executor
steps:
- custom_attach_workspace
- init_environment
- install_chrome_libs
# Build aio (with local Angular packages)
- run: yarn --cwd aio build-local<<# parameters.viewengine >>-with-viewengine<</ parameters.viewengine >>-ci
# Run unit tests
- run: yarn --cwd aio test --progress=false --watch=false
# Run e2e tests
- run: yarn --cwd aio e2e --configuration=ci
# Run PWA-score tests
- run: yarn --cwd aio test-pwa-score-localhost $CI_AIO_MIN_PWA_SCORE
# Check the bundle sizes.
- run: yarn --cwd aio payload-size aio-local<<# parameters.viewengine >>-viewengine<</ parameters.viewengine >>
test_aio_tools:
executor: default-executor
steps:
- custom_attach_workspace
- init_environment
# Install
- run: yarn --cwd aio install --frozen-lockfile --non-interactive
- run: yarn --cwd aio extract-cli-command-docs
# Run tools tests
- run: yarn --cwd aio tools-test
- run: ./aio/aio-builds-setup/scripts/test.sh
test_docs_examples:
parameters:
viewengine:
type: boolean
default: false
executor:
name: default-executor
resource_class: xlarge
parallelism: 5
steps:
- custom_attach_workspace
- init_environment
- install_chrome_libs
# Install aio
- run: yarn --cwd aio install --frozen-lockfile --non-interactive
# Run examples tests. The "CIRCLE_NODE_INDEX" will be set if "parallelism" is enabled.
# Since the parallelism is set to "5", there will be five parallel CircleCI containers.
# with either "0", "1", etc as node index. This can be passed to the "--shard" argument.
- run: yarn --cwd aio example-e2e --setup --local <<# parameters.viewengine >>--viewengine<</ parameters.viewengine >> --cliSpecsConcurrency=5 --shard=${CIRCLE_NODE_INDEX}/${CIRCLE_NODE_TOTAL} --retry 2
# This job should only be run on PR builds, where `CI_PULL_REQUEST` is not `false`.
aio_preview:
executor: default-executor
environment:
AIO_SNAPSHOT_ARTIFACT_PATH: &aio_preview_artifact_path 'aio/tmp/snapshot.tgz'
steps:
- custom_attach_workspace
- init_environment
- run: ./aio/scripts/build-artifacts.sh $AIO_SNAPSHOT_ARTIFACT_PATH $CI_PULL_REQUEST $CI_COMMIT
- store_artifacts:
path: *aio_preview_artifact_path
# The `destination` needs to be kept in synch with the value of
# `AIO_ARTIFACT_PATH` in `aio/aio-builds-setup/Dockerfile`
destination: aio/dist/aio-snapshot.tgz
- run: node ./aio/scripts/create-preview $CIRCLE_BUILD_NUM
# This job should only be run on PR builds, where `CI_PULL_REQUEST` is not `false`.
test_aio_preview:
executor: default-executor
steps:
- custom_attach_workspace
- init_environment
- install_chrome_libs
- run: yarn --cwd aio install --frozen-lockfile --non-interactive
- run:
name: Wait for preview and run tests
command: node aio/scripts/test-preview.js $CI_PULL_REQUEST $CI_COMMIT $CI_AIO_MIN_PWA_SCORE
# The `build-npm-packages` tasks exist for backwards-compatibility with old scripts and
# tests that rely on the pre-Bazel `dist/packages-dist` output structure (build.sh).
# Having multiple jobs that independently build in this manner duplicates some work; we build
# the bazel packages more than once. Even though we have a remote cache, these jobs will
# typically run in parallel so up-to-date outputs will not be available at the time the build
# starts.
# Build the view engine npm packages. No new jobs should depend on this.
build-npm-packages:
executor:
name: default-executor
resource_class: xlarge
steps:
- custom_attach_workspace
- init_environment
- run: node scripts/build/build-packages-dist.js
# Save the npm packages from //packages/... for other workflow jobs to read
- persist_to_workspace:
root: *workspace_location
paths:
- ng/dist/packages-dist
- ng/dist/zone.js-dist
# Save dependencies and bazel repository cache to use on subsequent runs.
- save_cache:
key: *cache_key
paths:
- "node_modules"
- "aio/node_modules"
- "~/bazel_repository_cache"
- "~/.cache/bazelisk"
# Build the ivy npm packages.
build-ivy-npm-packages:
executor:
name: default-executor
resource_class: xlarge
steps:
- custom_attach_workspace
- init_environment
- run: node scripts/build/build-ivy-npm-packages.js
# Save the npm packages from //packages/... for other workflow jobs to read
- persist_to_workspace:
root: *workspace_location
paths:
- ng/dist/packages-dist-ivy-aot
2020-02-29 14:05:23 -05:00
- ng/dist/zone.js-dist-ivy-aot
# This job creates compressed tarballs (`.tgz` files) for all Angular packages and stores them as
# build artifacts. This makes it easy to try out changes from a PR build for testing purposes.
# More info CircleCI build artifacts: https://circleci.com/docs/2.0/artifacts
#
# NOTE: Currently, this job only runs for PR builds. See `publish_snapshot` for non-PR builds.
publish_packages_as_artifacts:
executor: default-executor
environment:
NG_PACKAGES_DIR: &ng_packages_dir 'dist/packages-dist'
NG_PACKAGES_ARCHIVES_DIR: &ng_packages_archives_dir 'dist/packages-dist-archives'
ZONEJS_PACKAGES_DIR: &zonejs_packages_dir 'dist/zone.js-dist'
ZONEJS_PACKAGES_ARCHIVES_DIR: &zonejs_packages_archives_dir 'dist/zone.js-dist-archives'
steps:
- custom_attach_workspace
- init_environment
# Publish `@angular/*` packages.
- run:
name: Create artifacts for @angular/* packages
command: ./scripts/ci/create-package-archives.sh $CI_BRANCH $CI_COMMIT $NG_PACKAGES_DIR $NG_PACKAGES_ARCHIVES_DIR
- store_artifacts:
path: *ng_packages_archives_dir
destination: angular
# Publish `zone.js` package.
- run:
name: Create artifacts for zone.js package
command: ./scripts/ci/create-package-archives.sh $CI_BRANCH $CI_COMMIT $ZONEJS_PACKAGES_DIR $ZONEJS_PACKAGES_ARCHIVES_DIR
- store_artifacts:
path: *zonejs_packages_archives_dir
destination: zone.js
# This job updates the content of repos like github.com/angular/core-builds
# for every green build on angular/angular.
publish_snapshot:
executor: default-executor
steps:
# See below - ideally this job should not trigger for non-upstream builds.
# But since it does, we have to check this condition.
- run:
name: Skip this job for Pull Requests and Fork builds
# Note: Using `CIRCLE_*` env variables (instead of those defined in `env.sh` so that this
# step can be run before `init_environment`.
command: >
if [[ -n "${CIRCLE_PR_NUMBER}" ]] ||
[[ "$CIRCLE_PROJECT_USERNAME" != "angular" ]] ||
[[ "$CIRCLE_PROJECT_REPONAME" != "angular" ]]; then
circleci step halt
fi
- custom_attach_workspace
- init_environment
# CircleCI has a config setting to force SSH for all github connections
# This is not compatible with our mechanism of using a Personal Access Token
# Clear the global setting
- run: git config --global --unset "url.ssh://git@github.com.insteadof"
- run:
name: Decrypt github credentials
# We need ensure that the same default digest is used for encoding and decoding with
# OpenSSL. OpenSSL versions might have different default digests which can cause
# decryption failures based on the installed OpenSSL version. https://stackoverflow.com/a/39641378/4317734
command: 'openssl aes-256-cbc -d -in .circleci/github_token -md md5 -k "${KEY}" -out ~/.git_credentials'
- run: ./scripts/ci/publish-build-artifacts.sh
aio_monitoring_stable:
executor: default-executor
steps:
- custom_attach_workspace
- init_environment
- install_chrome_libs
- run: setPublicVar_CI_STABLE_BRANCH
- run:
name: Check out `aio/` and yarn from the stable branch
command: |
git fetch origin $CI_STABLE_BRANCH
git checkout --force origin/$CI_STABLE_BRANCH -- aio/ .yarn/ .yarnrc
# Ignore yarn's engines check, because we checked out `aio/package.json` from the stable
# branch and there could be a node version skew, which is acceptable in this monitoring job.
- run: yarn config set ignore-engines true
- run:
name: Run tests against https://angular.io/
command: ./aio/scripts/test-production.sh https://angular.io/ $CI_AIO_MIN_PWA_SCORE
- notify_webhook_on_fail:
webhook_url_env_var: SLACK_CARETAKER_WEBHOOK_URL
- notify_webhook_on_fail:
webhook_url_env_var: SLACK_DEV_INFRA_CI_FAILURES_WEBHOOK_URL
aio_monitoring_next:
executor: default-executor
steps:
- custom_attach_workspace
- init_environment
- install_chrome_libs
- run:
name: Run tests against https://next.angular.io/
command: ./aio/scripts/test-production.sh https://next.angular.io/ $CI_AIO_MIN_PWA_SCORE
- notify_webhook_on_fail:
webhook_url_env_var: SLACK_CARETAKER_WEBHOOK_URL
- notify_webhook_on_fail:
webhook_url_env_var: SLACK_DEV_INFRA_CI_FAILURES_WEBHOOK_URL
legacy-unit-tests-saucelabs:
executor:
name: default-executor
# In order to avoid the bottleneck of having a slow host machine, we acquire a better
# container for this job. This is necessary because we launch a lot of browsers concurrently
# and therefore the tunnel and Karma need to process a lot of file requests and tests.
resource_class: xlarge
steps:
- custom_attach_workspace
- init_environment
2020-02-06 08:17:03 -05:00
- init_saucelabs_environment
- run:
name: Starting Saucelabs tunnel service
command: ./tools/saucelabs/sauce-service.sh run
background: true
# add module umd tsc compile option so the test can work
# properly in the legacy browsers
- run: yarn tsc -p packages --module UMD
- run: yarn tsc -p modules --module UMD
- run: yarn bazel build //packages/zone.js:npm_package
fix(core): detect DI parameters in JIT mode for downleveled ES2015 classes (#38463) In the Angular Package Format, we always shipped UMD bundles and previously even ES5 module output. With V10, we removed the ES5 module output but kept the UMD ES5 output. For this, we were able to remove our second TypeScript transpilation. Instead we started only building ES2015 output and then downleveled it to ES5 UMD for the NPM packages. This worked as expected but unveiled an issue in the `@angular/core` reflection capabilities. In JIT mode, Angular determines constructor parameters (for DI) using the `ReflectionCapabilities`. The reflection capabilities basically read runtime metadata of classes to determine the DI parameters. Such metadata can be either stored in static class properties like `ctorParameters` or within TypeScript's `design:params`. If Angular comes across a class that does not have any parameter metadata, it tries to detect if the given class is actually delegating to an inherited class. It does this naively in JIT by checking if the stringified class (function in ES5) matches a certain pattern. e.g. ```js function MatTable() { var _this = _super.apply(this, arguments) || this; ``` These patterns are reluctant to changes of the class output. If a class is not recognized properly, the DI parameters will be assumed empty and the class is **incorrectly** constructed without arguments. This actually happened as part of v10 now. Since we downlevel ES2015 to ES5 (instead of previously compiling sources directly to ES5), the class output changed slightly so that Angular no longer detects it. e.g. ```js var _this = _super.apply(this, __spread(arguments)) || this; ``` This happens because the ES2015 output will receive an auto-generated constructor if the class defines class properties. This constructor is then already containing an explicit `super` call. ```js export class MatTable extends CdkTable { constructor() { super(...arguments); this.disabled = true; } } ``` If we then downlevel this file to ES5 with `--downlevelIteration`, TypeScript adjusts the `super` call so that the spread operator is no longer used (not supported in ES5). The resulting super call is different to the super call that would have been emitted if we would directly transpile to ES5. Ultimately, Angular no longer detects such classes as having an delegate constructor -> and DI breaks. We fix this by expanding the rather naive RegExp patterns used for the reflection capabilities so that downleveled pass-through/delegate constructors are properly detected. There is a risk of a false-positive as we cannot detect whether `__spread` is actually the TypeScript spread helper, but given the reflection patterns already make lots of assumptions (e.g. that `super` is actually the superclass, we should be fine making this assumption too. The false-positive would not result in a broken app, but rather in unnecessary providers being injected (as a noop). Fixes #38453 PR Close #38463
2020-08-14 04:23:53 -04:00
# Build test fixtures for a test that rely on Bazel-generated fixtures. Note that disabling
# specific tests which are reliant on such generated fixtures is not an option as SystemJS
# in the Saucelabs legacy job always fetches referenced files, even if the imports would be
# guarded by an check to skip in the Saucelabs legacy job. We should be good running such
# test in all supported browsers on Saucelabs anyway until this job can be removed.
- run:
name: Preparing Bazel-generated fixtures required in legacy tests
command: |
yarn bazel build //packages/core/test:downleveled_es5_fixture
# Needed for the ES5 downlevel reflector test in `packages/core/test/reflection`.
cp dist/bin/packages/core/test/reflection/es5_downleveled_inheritance_fixture.js \
dist/all/@angular/core/test/reflection/es5_downleveled_inheritance_fixture.js
- run:
# Waiting on ready ensures that we don't run tests too early without Saucelabs not being ready.
name: Waiting for Saucelabs tunnel to connect
command: ./tools/saucelabs/sauce-service.sh ready-wait
2020-02-06 08:17:03 -05:00
- run:
name: Running tests on Saucelabs.
command: |
browsers=$(node -e 'console.log(require("./browser-providers.conf").sauceAliases.CI_REQUIRED.join(","))')
yarn karma start ./karma-js.conf.js --single-run --browsers=${browsers}
- run:
name: Stop Saucelabs tunnel service
command: ./tools/saucelabs/sauce-service.sh stop
# Job that runs all unit tests of the `angular/components` repository.
components-repo-unit-tests:
executor:
name: default-executor
resource_class: xlarge
steps:
- custom_attach_workspace
- init_environment
# Restore the cache before cloning the repository because the clone script re-uses
# the restored repository if present. This reduces the amount of times the components
# repository needs to be cloned (this is slow and increases based on commits in the repo).
- restore_cache:
keys:
- *components_repo_unit_tests_cache_key
# Whenever the `angular/components` SHA is updated, the cache key will no longer
# match. The fallback cache will still match, and CircleCI will restore the most
# recently cached repository folder. Without the fallback cache, we'd need to download
# the repository from scratch and it would slow down the job. This is because we can't
# clone the repository with reduced `--depth`, but rather need to clone the whole
# repository to be able to support arbitrary SHAs.
- *components_repo_unit_tests_cache_key_fallback
- run:
name: "Fetching angular/components repository"
command: ./scripts/ci/clone_angular_components_repo.sh
- run:
# Run yarn install to fetch the Bazel binaries as used in the components repo.
name: Installing dependencies.
# TODO: remove this once the repo has been updated to use NodeJS v12 and Yarn 1.19.1.
# We temporarily ignore the "engines" because the Angular components repository has
# minimum dependency on NodeJS v12 and Yarn 1.19.1, but the framework repository uses
# older versions.
command: yarn --ignore-engines --cwd ${COMPONENTS_REPO_TMP_DIR} install --frozen-lockfile --non-interactive
- save_cache:
key: *components_repo_unit_tests_cache_key
paths:
# Temporary directory must be kept in sync with the `$COMPONENTS_REPO_TMP_DIR` env
# variable. It needs to be hardcoded here, because env variables interpolation is
# not supported.
- "/tmp/angular-components-repo"
- run:
# Updates the `angular/components` `package.json` file to refer to the release output
# inside the `packages-dist` directory. Note that it's not necessary to perform a yarn
# install as Bazel runs Yarn automatically when needed.
name: Setting up release packages.
command: node scripts/ci/update-deps-to-dist-packages.js ${COMPONENTS_REPO_TMP_DIR}/package.json dist/packages-dist/
- run:
name: "Running `angular/components` unit tests"
command: ./scripts/ci/run_angular_components_unit_tests.sh
test_zonejs:
executor:
name: default-executor
resource_class: xlarge
steps:
- custom_attach_workspace
- init_environment
# Install
- run: yarn --cwd packages/zone.js install --frozen-lockfile --non-interactive
# Run zone.js tools tests
- run: yarn --cwd packages/zone.js promisetest
- run: yarn --cwd packages/zone.js promisefinallytest
- run: yarn bazel build //packages/zone.js:npm_package &&
feat(zone.js): upgrade zone.js to angular package format(APF) (#36540) Close #35157 In the current version of zone.js, zone.js uses it's own package format, and it is not following the rule of Angualr package format(APF), so it is not easily to be consumed by Angular CLI or other bundle tools. For example, zone.js npm package has two bundles, 1. zone.js/dist/zone.js, this is a `es5` bundle. 2. zone.js/dist/zone-evergreen.js, this is a `es2015` bundle. And Angular CLI has to add some hard-coding code to handle this case, ohttps://github.com/angular/angular-cli/blob/5376a8b1392ac7bd252782d8474161ce03a4d1cb/packages/schematics/angular/application/files/src/polyfills.ts.template#L55-L58 This PR upgrade zone.js npm package format to follow APF rule, https://docs.google.com/document/d/1CZC2rcpxffTDfRDs6p1cfbmKNLA6x5O-NtkJglDaBVs/edit#heading=h.k0mh3o8u5hx The updated points are: 1. in package.json, update all bundle related properties ``` "main": "./bundles/zone.umd.js", "module": "./fesm2015/zone.js", "es2015": "./fesm2015/zone.js", "fesm2015": "./fesm2015/zone.js", ``` 2. re-organize dist folder, for example for `zone.js` bundle, now we have ``` dist/ bundles/ zone.js // this is the es5 bundle fesm2015/ zone.js // this is the es2015 bundle (in the old version is `zone-evergreen.js`) ``` 3. have several sub-packages. 1. `zone-testing`, provide zone-testing bundles include zone.js and testing libraries 2. `zone-node`, provide zone.js implemention for NodeJS 3. `zone-mix`, provide zone.js patches for both Browser and NodeJS All those sub-packages will have their own `package.json` and the bundle will reference `bundles(es5)` and `fesm2015(es2015)`. 4. keep backward compatibility, still keep the `zone.js/dist` folder, and all bundles will be redirected to `zone.js/bundles` or `zone.js/fesm2015` folders. PR Close #36540
2020-05-16 21:53:03 -04:00
cp dist/bin/packages/zone.js/npm_package/bundles/zone-mix.umd.js ./packages/zone.js/test/extra/ &&
cp dist/bin/packages/zone.js/npm_package/bundles/zone-patch-electron.umd.js ./packages/zone.js/test/extra/ &&
yarn --cwd packages/zone.js electrontest
- run: yarn --cwd packages/zone.js jest:test
- run: yarn --cwd packages/zone.js jest:nodetest
- run: yarn --cwd packages/zone.js/test/typings install --frozen-lockfile --non-interactive
- run: yarn --cwd packages/zone.js/test/typings test
# Windows jobs
# Docs: https://circleci.com/docs/2.0/hello-world-windows/
test_win:
executor: windows-executor
steps:
- setup_win
- run:
# Ran into a command parsing problem where `-browser:chromium-local` was converted to
# `-browser: chromium-local` (a space was added) in https://circleci.com/gh/angular/angular/357511.
# Probably a powershell command parsing thing. There's no problem using a yarn script though.
command: yarn circleci-win-ve
no_output_timeout: 45m
# Save bazel repository cache to use on subsequent runs.
# We don't save node_modules because it's faster to use the linux workspace and reinstall.
- save_cache:
key: *cache_key_win
paths:
- "C:/Users/circleci/bazel_repository_cache"
test_ivy_aot_win:
executor: windows-executor
steps:
- setup_win
- run:
command: yarn circleci-win-ivy
no_output_timeout: 45m
workflows:
version: 2
default_workflow:
jobs:
- setup:
filters:
branches:
ignore: g3
- lint:
requires:
- setup
- test:
requires:
- setup
- test_ivy_aot:
requires:
- setup
- build-npm-packages:
requires:
- setup
- build-ivy-npm-packages:
requires:
- setup
- legacy-unit-tests-saucelabs:
requires:
- setup
- test_aio:
requires:
- setup
- deploy_aio:
requires:
- test_aio
- test_aio_local:
requires:
- build-npm-packages
- test_aio_local:
name: test_aio_local_viewengine
viewengine: true
requires:
- build-npm-packages
- test_aio_tools:
requires:
- build-npm-packages
- test_docs_examples:
requires:
- build-npm-packages
- test_docs_examples:
name: test_docs_examples_viewengine
viewengine: true
requires:
- build-npm-packages
- aio_preview:
# Only run on PR builds. (There can be no previews for non-PR builds.)
<<: *only_on_pull_requests
requires:
- setup
- test_aio_preview:
requires:
- aio_preview
- publish_packages_as_artifacts:
requires:
- build-npm-packages
- publish_snapshot:
# Note: no filters on this job because we want it to run for all upstream branches
# We'd really like to filter out pull requests here, but not yet available:
# https://discuss.circleci.com/t/workflows-pull-request-filter/14396/4
# Instead, the job just exits immediately at the first step.
requires:
# Only publish if tests and integration tests pass
- test
- test_ivy_aot
# Only publish if `aio`/`docs` tests using the locally built Angular packages pass
- test_aio_local
- test_aio_local_viewengine
- test_docs_examples
- test_docs_examples_viewengine
# Get the artifacts to publish from the build-packages-dist job
# since the publishing script expects the legacy outputs layout.
- build-npm-packages
- build-ivy-npm-packages
- legacy-unit-tests-saucelabs
build: upgrade angular build, integration/bazel and @angular/bazel package to rule_nodejs 2.2.0 (#39182) Updates to rules_nodejs 2.2.0. This is the first major release in 7 months and includes a number of features as well as breaking changes. Release notes: https://github.com/bazelbuild/rules_nodejs/releases/tag/2.0.0 Features of note for angular/angular: * stdout/stderr/exit code capture; this could be potentially be useful * TypeScript (ts_project); a simpler tsc rule that ts_library that can be used in the repo where ts_library is too heavy weight Breaking changes of note for angular/angular: * loading custom rules from npm packages: `ts_library` is no longer loaded from `@npm_bazel_typescript//:index.bzl` (which no longer exists) but is now loaded from `@npm//@bazel/typescript:index.bzl` * with the loading changes above, `load("@npm//:install_bazel_dependencies.bzl", "install_bazel_dependencies")` is no longer needed in the WORKSPACE which also means that yarn_install does not need to run unless building/testing a target that depends on @npm. In angular/angular this is a minor improvement as almost everything depends on @npm. * @angular/bazel package is also updated in this PR to support the new load location; Angular + Bazel users that require it for ng_package (ng_module is no longer needed in OSS with Angular 10) will need to load from `@npm//@angular/bazel:index.bzl`. I investigated if it was possible to maintain backward compatability for the old load location `@npm_angular_bazel` but it is not since the package itself needs to be updated to load from `@npm//@bazel/typescript:index.bzl` instead of `@npm_bazel_typescript//:index.bzl` as it depends on ts_library internals for ng_module. * runfiles.resolve will now throw instead of returning undefined to match behavior of node require Other changes in angular/angular: * integration/bazel has been updated to use both ng_module and ts_libary with use_angular_plugin=true. The latter is the recommended way for rules_nodejs users to compile Angular 10 with Ivy. Bazel + Angular ViewEngine is supported with @angular/bazel <= 9.0.5 and Angular <= 8. There is still Angular ViewEngine example on rules_nodejs https://github.com/bazelbuild/rules_nodejs/tree/stable/examples/angular_view_engine on these older versions but users that want to update to Angular 10 and are on Bazel must switch to Ivy and at that point ts_library with use_angular_plugin=true is more performant that ng_module. Angular example in rules_nodejs is configured this way as well: https://github.com/bazelbuild/rules_nodejs/tree/stable/examples/angular. As an aside, we also have an example of building Angular 10 with architect() rule directly instead of using ts_library with angular plugin: https://github.com/bazelbuild/rules_nodejs/tree/stable/examples/angular_bazel_architect. NB: ng_module is still required for angular/angular repository as it still builds ViewEngine & @angular/bazel also provides the ng_package rule. ng_module can be removed in the future if ViewEngine is no longer needed in angular repo. * JSModuleInfo provider added to ng_module. this is for forward compat for future rules_nodejs versions. PR Close #39182
2020-06-25 04:32:41 -04:00
# Temporarily disabled components-repo-unit-tests to update rules_nodejs to 2.0.0. Breaking changes in
# rules_nodejs create a dependency sandwich between angular/angular & angular/components that are very
# difficult and time consuming to resolve and involve patching @angular/bazel in components repo such
# as https://github.com/angular/components/commit/9e7ba251207df77164d73d66620e619bcbc4d2ad. It is simpler to
# 1) land angular/angular upgrade to rule_nodejs 2.0.0 which has breaking changes
# 2) land angular/components upgrade to rules_nodejs 2.0.0 using the @angular/bazel builds snapshot
# 3) update angular/angular to the landed components commit and re-enable these tests
# - components-repo-unit-tests:
# requires:
# - build-npm-packages
- test_zonejs:
requires:
- setup
# Windows Jobs
# These are very slow so we run them on non-PRs only for now.
# TODO: remove the filter when CircleCI makes Windows FS faster.
# The Windows jobs are only run after their non-windows counterparts finish successfully.
# This isn't strictly necessary as there is no artifact dependency, but helps economize
# CI resources by not attempting to build when we know should fail.
- test_win:
<<: *skip_on_pull_requests
requires:
- test
- test_ivy_aot_win:
<<: *skip_on_pull_requests
requires:
- test_ivy_aot
monitoring:
jobs:
- setup
- aio_monitoring_stable:
requires:
- setup
- aio_monitoring_next:
requires:
- setup
- saucelabs_ivy:
# Testing saucelabs via Bazel currently taking longer than the legacy saucelabs job as it
# each karma_web_test target is provisioning and tearing down browsers which is adding
# a lot of overhead. Running once daily on master only to avoid wasting resources and
# slowing down CI for PRs.
# TODO: Run this job on all branches (including PRs) once karma_web_test targets can
# share provisioned browsers and we can remove the legacy saucelabs job.
requires:
- setup
- saucelabs_view_engine:
# Testing saucelabs via Bazel currently taking longer than the legacy saucelabs job as it
# each karma_web_test target is provisioning and tearing down browsers which is adding
# a lot of overhead. Running once daily on master only to avoid wasting resources and
# slowing down CI for PRs.
# TODO: Run this job on all branches (including PRs) once karma_web_test targets can
# share provisioned browsers and we can remove the legacy saucelabs job.
requires:
- setup
triggers:
- schedule:
<<: *only_on_master
# Runs monitoring jobs at 10:00AM every day.
cron: "0 10 * * *"