refactor(core): tweek micro-benchmarks to make them more consistent (#33207)
``` ┌────────────────────────────────────┬─────────┬──────┬───────────┬───────────┬───────┐ │ (index) │ time │ unit │ base_time │ base_unit │ % │ ├────────────────────────────────────┼─────────┼──────┼───────────┼───────────┼───────┤ │ directive_instantiate │ 2.474 │ 'us' │ 2.507 │ 'us' │ -1.32 │ │ element_text_create │ 1.313 │ 'us' │ 1.319 │ 'us' │ -0.45 │ │ interpolation │ 220.17 │ 'us' │ 224.217 │ 'us' │ -1.8 │ │ listeners │ 1.988 │ 'us' │ 2.021 │ 'us' │ -1.63 │ │ map_based_style_and_class_bindings │ 17.908 │ 'ms' │ 18.523 │ 'ms' │ -3.32 │ │ noop_change_detection │ 24.851 │ 'us' │ 24.874 │ 'us' │ -0.09 │ │ property_binding │ 218.76 │ 'us' │ 216.736 │ 'us' │ 0.93 │ │ property_binding_update │ 443.175 │ 'us' │ 447.686 │ 'us' │ -1.01 │ │ style_and_class_bindings │ 1.053 │ 'ms' │ 1.069 │ 'ms' │ -1.5 │ │ style_binding │ 488.154 │ 'us' │ 484.092 │ 'us' │ 0.84 │ └────────────────────────────────────┴─────────┴──────┴───────────┴───────────┴───────┘ ``` PR Close #33207
This commit is contained in:
parent
cc7c2a81df
commit
4800fa1c08
|
@ -1,19 +1,19 @@
|
|||
### Build
|
||||
|
||||
```
|
||||
yarn bazel build //packages/core/test/render3/perf:{name}.min_debug.es2015.js --define=compile=aot
|
||||
yarn bazel build //packages/core/test/render3/perf:${BENCHMARK}.min_debug.es2015.js --define=compile=aot
|
||||
```
|
||||
|
||||
### Run
|
||||
|
||||
```
|
||||
node dist/bin/packages/core/test/render3/perf/{name}.min_debug.es2015.js
|
||||
node dist/bin/packages/core/test/render3/perf/${BENCHMARK}.min_debug.es2015.js
|
||||
```
|
||||
|
||||
### Profile
|
||||
|
||||
```
|
||||
node --no-turbo-inlining --inspect-brk dist/bin/packages/core/test/render3/perf/{name}.min_debug.es2015.js
|
||||
node --no-turbo-inlining --inspect-brk dist/bin/packages/core/test/render3/perf/${BENCHMARK}.min_debug.es2015.js
|
||||
```
|
||||
|
||||
then connect with a debugger (the `--inspect-brk` option will make sure that benchmark execution doesn't start until a debugger is connected and the code execution is manually resumed).
|
||||
|
@ -64,7 +64,8 @@ The resulting output should look something like this:
|
|||
|
||||
### Notes
|
||||
|
||||
In all the above commands {name} should be replaced with the actual benchmark (folder) name, ex.:
|
||||
In all the above commands `${BENCHMARK}` should be replaced with the actual benchmark (folder) name, ex.:
|
||||
- build: `yarn bazel build //packages/core/test/render3/perf:noop_change_detection.min_debug.es2015.js --define=compile=aot`
|
||||
- run: `time node dist/bin/packages/core/test/render3/perf/noop_change_detection.min_debug.es2015.js`
|
||||
- profile: `node --no-turbo-inlining --inspect-brk dist/bin/packages/core/test/render3/perf/noop_change_detection.min_debug.es2015.js profile`
|
||||
- experimenting `BENCHMARK=noop_change_detection; yarn bazel build //packages/core/test/render3/perf:${BENCHMARK}.min_debug.es2015.js --define=compile=aot && node dist/bin/packages/core/test/render3/perf/${BENCHMARK}.min_debug.es2015.js`
|
||||
|
|
|
@ -90,9 +90,7 @@ const createTime = directiveInstantiate('create');
|
|||
|
||||
console.profile('directive_instantiate');
|
||||
while (createTime()) {
|
||||
for (let i = 0; i < 50000; i++) {
|
||||
createAndRenderLView(null, embeddedTView, viewTNode);
|
||||
}
|
||||
createAndRenderLView(null, embeddedTView, viewTNode);
|
||||
}
|
||||
console.profileEnd();
|
||||
|
||||
|
|
|
@ -81,9 +81,7 @@ const createTime = elementTextCreate('create');
|
|||
|
||||
console.profile('element_text_create');
|
||||
while (createTime()) {
|
||||
for (let i = 0; i < 100000; i++) {
|
||||
createAndRenderLView(null, embeddedTView, viewTNode);
|
||||
}
|
||||
createAndRenderLView(null, embeddedTView, viewTNode);
|
||||
}
|
||||
console.profileEnd();
|
||||
|
||||
|
|
|
@ -7,8 +7,11 @@
|
|||
*/
|
||||
const performance = require('perf_hooks').performance;
|
||||
|
||||
const MIN_SAMPLE_COUNT_NO_IMPROVEMENT = 30;
|
||||
const MIN_SAMPLE_DURATION = 100;
|
||||
// Higher number here makes it more likely that we are more sure of the result.
|
||||
const MIN_SAMPLE_COUNT_NO_IMPROVEMENT = 100;
|
||||
// A smaller number here means that we are coming too close on timer resultion, but it also means
|
||||
// that it is less likely that we will be bothered by GC or preemptive multi tasking.
|
||||
const MIN_SAMPLE_DURATION = 10;
|
||||
|
||||
const UNITS = ['ms', 'us', 'ns', 'ps'];
|
||||
export interface Benchmark {
|
||||
|
|
Loading…
Reference in New Issue