From e3cde53d641b720ccc187177583b62719873b740 Mon Sep 17 00:00:00 2001 From: Anton Tayanovskyy Date: Mon, 12 Feb 2024 16:21:36 -0500 Subject: [PATCH] Do not run benchmark prewarm in parallel with the benchmark (#1584) Looks like ProgramTest defaults to enabling t.Parallel(), and this defeats the purpose of prewarm phase in provider benchmarks. The intent of prewarm was to do a no-op to make sure dependencies are downloaded and avoid measuring that overhead as part of the benchmark. This was not the case though because prewarm was running in parallel with the actual benchmark. After the change, each individual benchmark first does unmeasured prewarm, and then does the measurement. In the future we may want to measure prewarm as it is interesting for user experience as well. --- misc/test/performance_test.go | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/misc/test/performance_test.go b/misc/test/performance_test.go index 2529be9b4..5a959e6e3 100644 --- a/misc/test/performance_test.go +++ b/misc/test/performance_test.go @@ -230,6 +230,7 @@ func programTestAsBenchmark( SkipUpdate: true, AllowEmptyPreviewChanges: true, AllowEmptyUpdateChanges: true, + NoParallel: true, }) prewarmOptions.ExtraRuntimeValidation = nil integration.ProgramTest(t, &prewarmOptions) @@ -237,7 +238,9 @@ func programTestAsBenchmark( // Run with --tracing to record measured data. t.Run("benchmark", func(t *testing.T) { - finalOptions := test.With(bench.ProgramTestOptions()) + finalOptions := test.With(bench.ProgramTestOptions()).With(integration.ProgramTestOptions{ + NoParallel: true, + }) integration.ProgramTest(t, &finalOptions) }) }