diff --git a/packages/core/test/render3/perf/README.md b/packages/core/test/render3/perf/README.md index ab79a0bb53..eb3dac46af 100644 --- a/packages/core/test/render3/perf/README.md +++ b/packages/core/test/render3/perf/README.md @@ -1,19 +1,19 @@ ### Build ``` -yarn bazel build //packages/core/test/render3/perf:{name}.min_debug.es2015.js --define=compile=aot +yarn bazel build //packages/core/test/render3/perf:${BENCHMARK}.min_debug.es2015.js --define=compile=aot ``` ### Run ``` -node dist/bin/packages/core/test/render3/perf/{name}.min_debug.es2015.js +node dist/bin/packages/core/test/render3/perf/${BENCHMARK}.min_debug.es2015.js ``` ### Profile ``` -node --no-turbo-inlining --inspect-brk dist/bin/packages/core/test/render3/perf/{name}.min_debug.es2015.js +node --no-turbo-inlining --inspect-brk dist/bin/packages/core/test/render3/perf/${BENCHMARK}.min_debug.es2015.js ``` then connect with a debugger (the `--inspect-brk` option will make sure that benchmark execution doesn't start until a debugger is connected and the code execution is manually resumed). @@ -64,7 +64,8 @@ The resulting output should look something like this: ### Notes -In all the above commands {name} should be replaced with the actual benchmark (folder) name, ex.: +In all the above commands `${BENCHMARK}` should be replaced with the actual benchmark (folder) name, ex.: - build: `yarn bazel build //packages/core/test/render3/perf:noop_change_detection.min_debug.es2015.js --define=compile=aot` - run: `time node dist/bin/packages/core/test/render3/perf/noop_change_detection.min_debug.es2015.js` - profile: `node --no-turbo-inlining --inspect-brk dist/bin/packages/core/test/render3/perf/noop_change_detection.min_debug.es2015.js profile` +- experimenting `BENCHMARK=noop_change_detection; yarn bazel build //packages/core/test/render3/perf:${BENCHMARK}.min_debug.es2015.js --define=compile=aot && node dist/bin/packages/core/test/render3/perf/${BENCHMARK}.min_debug.es2015.js` diff --git a/packages/core/test/render3/perf/directive_instantiate/index.ts b/packages/core/test/render3/perf/directive_instantiate/index.ts index 25d65d2a64..ab60d58241 100644 --- a/packages/core/test/render3/perf/directive_instantiate/index.ts +++ b/packages/core/test/render3/perf/directive_instantiate/index.ts @@ -90,9 +90,7 @@ const createTime = directiveInstantiate('create'); console.profile('directive_instantiate'); while (createTime()) { - for (let i = 0; i < 50000; i++) { - createAndRenderLView(null, embeddedTView, viewTNode); - } + createAndRenderLView(null, embeddedTView, viewTNode); } console.profileEnd(); diff --git a/packages/core/test/render3/perf/element_text_create/index.ts b/packages/core/test/render3/perf/element_text_create/index.ts index d9041a0cb3..43c2e870a5 100644 --- a/packages/core/test/render3/perf/element_text_create/index.ts +++ b/packages/core/test/render3/perf/element_text_create/index.ts @@ -81,9 +81,7 @@ const createTime = elementTextCreate('create'); console.profile('element_text_create'); while (createTime()) { - for (let i = 0; i < 100000; i++) { - createAndRenderLView(null, embeddedTView, viewTNode); - } + createAndRenderLView(null, embeddedTView, viewTNode); } console.profileEnd(); diff --git a/packages/core/test/render3/perf/micro_bench.ts b/packages/core/test/render3/perf/micro_bench.ts index d727935eab..f3c7940079 100644 --- a/packages/core/test/render3/perf/micro_bench.ts +++ b/packages/core/test/render3/perf/micro_bench.ts @@ -7,8 +7,11 @@ */ const performance = require('perf_hooks').performance; -const MIN_SAMPLE_COUNT_NO_IMPROVEMENT = 30; -const MIN_SAMPLE_DURATION = 100; +// Higher number here makes it more likely that we are more sure of the result. +const MIN_SAMPLE_COUNT_NO_IMPROVEMENT = 100; +// A smaller number here means that we are coming too close on timer resultion, but it also means +// that it is less likely that we will be bothered by GC or preemptive multi tasking. +const MIN_SAMPLE_DURATION = 10; const UNITS = ['ms', 'us', 'ns', 'ps']; export interface Benchmark {