Skip to content

Commit

Permalink
add benchmark script and testing (#158)
Browse files Browse the repository at this point in the history
* add benchmark script and testing

* add compat bounds

* update CompatHelper

* ignore benchmark for CI

* format

* adjust test values

* soften tolerances

* WIP: try include instead of trixi_include

* uncomment all elixirs again

* fix typo

* instantiate

* don't clutter terminal

* don't activate in benchmarks.jl

* add README

* move develop after activate

* use @__MODULE__ in trixi_include

* test to include plot in comment

* Apply suggestions from code review

Co-authored-by: Hendrik Ranocha <[email protected]>

* use braces

* return to original version

* try embedding link to artifact

---------

Co-authored-by: Hendrik Ranocha <[email protected]>
  • Loading branch information
JoshuaLampert and ranocha authored Oct 29, 2024
1 parent 5734033 commit 17a3d77
Show file tree
Hide file tree
Showing 10 changed files with 147 additions and 7 deletions.
4 changes: 4 additions & 0 deletions .github/workflows/CI.yml
Original file line number Diff line number Diff line change
Expand Up @@ -9,23 +9,27 @@ on:
- 'LICENSE.md'
- 'README.md'
- '.zenodo.json'
- '.github/workflows/benchmark.yml'
- '.github/workflows/CompatHelper.yml'
- '.github/workflows/Documenter.yml'
- '.github/workflows/Format-check.yml'
- '.github/workflows/TagBot.yml'
- '.github/workflows/SpellCheck.yml'
- 'benchmark/**'
- 'docs/**'
pull_request:
paths-ignore:
- 'CITATION.bib'
- 'LICENSE.md'
- 'README.md'
- '.zenodo.json'
- '.github/workflows/benchmark.yml'
- '.github/workflows/CompatHelper.yml'
- '.github/workflows/Documenter.yml'
- '.github/workflows/Format-check.yml'
- '.github/workflows/TagBot.yml'
- '.github/workflows/SpellCheck.yml'
- 'benchmark/**'
- 'docs/**'
workflow_dispatch:

Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/CompatHelper.yml
Original file line number Diff line number Diff line change
Expand Up @@ -13,4 +13,4 @@ jobs:
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
COMPATHELPER_PRIV: ${{ secrets.DOCUMENTER_KEY }}
run: julia -e 'using CompatHelper; CompatHelper.main(; subdirs=["", "docs", "test"])'
run: julia -e 'using CompatHelper; CompatHelper.main(; subdirs=["", "docs", "test", "benchmark"])'
4 changes: 4 additions & 0 deletions .github/workflows/Documenter.yml
Original file line number Diff line number Diff line change
Expand Up @@ -7,15 +7,19 @@ on:
tags: '*'
paths-ignore:
- '.zenodo.json'
- '.github/workflows/benchmark.yml'
- '.github/workflows/CI.yml'
- '.github/workflows/CompatHelper.yml'
- '.github/workflows/TagBot.yml'
- 'benchmark/**'
pull_request:
paths-ignore:
- '.zenodo.json'
- '.github/workflows/benchmark.yml'
- '.github/workflows/CI.yml'
- '.github/workflows/CompatHelper.yml'
- '.github/workflows/TagBot.yml'
- 'benchmark/**'
workflow_dispatch:

concurrency:
Expand Down
2 changes: 2 additions & 0 deletions .github/workflows/Downgrade.yml
Original file line number Diff line number Diff line change
Expand Up @@ -8,11 +8,13 @@ on:
- 'LICENSE.md'
- 'README.md'
- '.zenodo.json'
- '.github/workflows/benchmark.yml'
- '.github/workflows/CompatHelper.yml'
- '.github/workflows/Documenter.yml'
- '.github/workflows/Format-check.yml'
- '.github/workflows/TagBot.yml'
- '.github/workflows/SpellCheck.yml'
- 'benchmark/**'
- 'docs/**'
workflow_dispatch:

Expand Down
78 changes: 78 additions & 0 deletions .github/workflows/benchmark.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,78 @@
name: Benchmark a Pull Request

on:
pull_request:
branches:
- main

permissions:
pull-requests: write

jobs:
benchmark:
runs-on: ubuntu-latest

steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@v2
with:
version: "1.10"
- uses: julia-actions/cache@v2
- name: Extract Package Name from Project.toml
id: extract-package-name
run: |
PACKAGE_NAME=$(grep "^name" Project.toml | sed 's/^name = "\(.*\)"$/\1/')
echo "package_name=$PACKAGE_NAME" >> $GITHUB_OUTPUT
- name: Build AirspeedVelocity
env:
JULIA_NUM_THREADS: 2
run: |
# Lightweight build step, as sometimes the runner runs out of memory:
julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.add("AirspeedVelocity")'
julia -e 'ENV["JULIA_PKG_PRECOMPILE_AUTO"]=0; import Pkg; Pkg.build("AirspeedVelocity")'
- name: Add ~/.julia/bin to PATH
run: |
echo "$HOME/.julia/bin" >> $GITHUB_PATH
- name: Run benchmarks
run: |
echo $PATH
ls -l ~/.julia/bin
mkdir results
benchpkg ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --url=${{ github.event.repository.clone_url }} --bench-on="${{github.event.pull_request.head.sha}}" --output-dir=results/ --tune
- name: Create plots from benchmarks
run: |
mkdir -p plots
benchpkgplot ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --npart=10 --format=png --input-dir=results/ --output-dir=plots/
- name: Upload plot as artifact
id: artifact-upload-step
uses: actions/upload-artifact@v4
with:
name: plots
path: plots
- name: Create markdown table from benchmarks
run: |
benchpkgtable ${{ steps.extract-package-name.outputs.package_name }} --rev="${{github.event.repository.default_branch}},${{github.event.pull_request.head.sha}}" --input-dir=results/ --ratio > table.md
echo '### Benchmark Results' > body.md
echo '' >> body.md
echo '' >> body.md
cat table.md >> body.md
echo '' >> body.md
echo '' >> body.md
echo '### Benchmark Plots' >> body.md
echo 'A plot of the benchmark results have been uploaded as an artifact to the workflow run for this PR.' >> body.md
echo '![Benchmark Plot](${{ steps.artifact-upload-step.outputs.artifact-url }})' >> body.md
- name: Find Comment
uses: peter-evans/find-comment@v3
id: fcbenchmark
with:
issue-number: ${{ github.event.pull_request.number }}
comment-author: 'github-actions[bot]'
body-includes: Benchmark Results

- name: Comment on PR
uses: peter-evans/create-or-update-comment@v4
with:
comment-id: ${{ steps.fcbenchmark.outputs.comment-id }}
issue-number: ${{ github.event.pull_request.number }}
body-path: body.md
edit-mode: replace
9 changes: 9 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
OrdinaryDiffEq = "1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"
SummationByPartsOperators = "9f78cca6-572e-554e-b819-917d2f1cf240"

[compat]
BenchmarkTools = "1"
OrdinaryDiffEq = "6.49.1"
SummationByPartsOperators = "0.5.63"
6 changes: 6 additions & 0 deletions benchmark/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
# Benchmarks

This directory contains some benchmark setups using [BenchmarkTools.jl](https://github.com/JuliaCI/BenchmarkTools.jl).
The file `benchmarks.jl` is run by a GitHub Action leveraging [AirspeedVelocity.jl](https://github.com/MilesCranmer/AirspeedVelocity.jl)
to generate a report on the performance of the package for each pull request. If you want to run the benchmarks locally, you can do so by running
`run_benchmarks.jl`, which returns a summary `results` of the benchmark results.
28 changes: 28 additions & 0 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,28 @@
using BenchmarkTools
using DispersiveShallowWater

const SUITE = BenchmarkGroup()

elixirs = [joinpath(examples_dir(), "bbm_1d", "bbm_1d_basic.jl"),
joinpath(examples_dir(), "bbm_1d", "bbm_1d_fourier.jl"),
joinpath(examples_dir(), "bbm_bbm_1d", "bbm_bbm_1d_dg.jl"),
joinpath(examples_dir(), "bbm_bbm_1d", "bbm_bbm_1d_relaxation.jl"),
joinpath(examples_dir(), "bbm_bbm_1d", "bbm_bbm_1d_upwind_relaxation.jl"),
joinpath(examples_dir(), "bbm_bbm_1d", "bbm_bbm_1d_basic_reflecting.jl"),
joinpath(examples_dir(), "hyperbolic_serre_green_naghdi_1d",
"hyperbolic_serre_green_naghdi_dingemans.jl"),
joinpath(examples_dir(), "serre_green_naghdi_1d",
"serre_green_naghdi_well_balanced.jl"),
joinpath(examples_dir(), "svaerd_kalisch_1d",
"svaerd_kalisch_1d_dingemans_relaxation.jl")]

for elixir in elixirs
benchname = joinpath(basename(dirname(elixir)), basename(elixir))
println("Running $benchname...")
redirect_stdout(devnull) do
trixi_include(@__MODULE__, elixir, tspan = (0.0, 1e-10))
end
SUITE[benchname] = @benchmarkable DispersiveShallowWater.rhs!($(similar(sol.u[end])),
$(copy(sol.u[end])),
$(semi), $(first(tspan)))
end
8 changes: 8 additions & 0 deletions benchmark/run_benchmarks.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,8 @@
using Pkg
Pkg.activate(@__DIR__)
Pkg.develop(PackageSpec(path = dirname(@__DIR__)))
Pkg.instantiate()

include("benchmarks.jl")
tune!(SUITE)
result = run(SUITE)
13 changes: 7 additions & 6 deletions test/test_serre_green_naghdi_1d.jl
Original file line number Diff line number Diff line change
Expand Up @@ -273,10 +273,10 @@ end
l2=[1.3655498085989206, 2.3967486930606716, 0.0],
linf=[1.001076318001934, 0.8052527556023067, 0.0],
cons_error=[0.0, 0.0002674927404067162, 0.0],
change_entropy=-0.0584189861183404,
change_entropy_modified=0.059273537492344985,
atol=1e-11, # to make CI pass
atol_ints=4e-9) # to make CI pass
change_entropy=-0.05841897226287074,
change_entropy_modified=0.059273551933074486,
atol_ints=2e-8, # to make CI pass
atol=2e-8) # to make CI pass

@test_allocations(semi, sol, allocs=900_000)
end
Expand All @@ -291,8 +291,9 @@ end
l2=[1.3655493671985637, 2.3967828251339003, 0.0],
linf=[1.001075913983051, 0.8052680970114169, 0.0],
cons_error=[1.1368683772161603e-13, 0.00026407261543415217, 0.0],
change_entropy=-0.058352284294869605,
change_entropy_modified=0.05927339747017868)
change_entropy=-0.058352273553509804,
change_entropy_modified=0.05927340849780194,
atol_ints=2e-8) # to make CI pass

@test_allocations(semi, sol, allocs=900_000)
end

0 comments on commit 17a3d77

Please sign in to comment.