Skip to content

Commit

Permalink
Bringing the .github/workflows/postbench.yml to master (#350)
Browse files Browse the repository at this point in the history
* First try on benchmark CI (#315)

(WIP)
TODO: store results and PR label triggering

* Test consecutive PR (#346)

Merge all new advancements on the `benchx` branch in order to do a follow up consecutive PR to test github actions

* Trivial change, test PR (#347)

* Trivial change, test PR

* Add PR write permission

* Test full permissions

* Granular permissions

* try write-all

* try label event

* labeled in pull_request

* Store results as artifact and trigger a consecutive workflow_run

* Workflow chain almost complete (WIP)

* correct .benchmarkci path

* Add benchx support branch

* Just try pull_request_target instead

* Remove target

* Get rid of s to get the chain going

---------

Co-authored-by: Guillaume Dalle <[email protected]>

* Simplify for master merge

* Fixed formatting

* Integrate review comments

---------

Co-authored-by: Guillaume Dalle <[email protected]>
  • Loading branch information
filchristou and gdalle authored Mar 15, 2024
1 parent d1081b0 commit e773bce
Show file tree
Hide file tree
Showing 18 changed files with 914 additions and 165 deletions.
61 changes: 61 additions & 0 deletions .github/workflows/postbench.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
name: PostBenchmarks
on:
workflow_run:
workflows: [RunUploadBenchmarks]
types: [completed]
branches: [master, benchx]
jobs:
on-success:
runs-on: ubuntu-latest
# if: ${{ github.event.workflow_run.conclusion == 'success' }}
steps:
- run: echo 'The triggering workflow passed'
- name: 'Download artifact'
uses: actions/github-script@v6
with:
script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id,
});
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "BenchmarkResults"
})[0];
let download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
let fs = require('fs');
fs.writeFileSync(`${process.env.GITHUB_WORKSPACE}/BenchmarkResults.zip`, Buffer.from(download.data));
- name: 'Unzip artifact'
run: unzip BenchmarkResults.zip -d .benchmarkci/
- name: 'Post results test 1'
run: julia -e 'using BenchmarkCI; BenchmarkCI.postjudge()'
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: 'Comment on PR test 2'
uses: actions/github-script@v6
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
script: |
let fs = require('fs');
let issue_number = Number(fs.readFileSync('./pr_number'));
await github.rest.issues.createComment({
owner: context.repo.owner,
repo: context.repo.repo,
issue_number: issue_number,
body: 'Thank you for the PR!'
});
- name: 'Comment on PR test 3'
run: gh pr comment "$NUMBER" --body "$BODY"
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GH_REPO: ${{ github.repository }}
NUMBER: ${{ github.event.pull_request.number }}
BODY: >
This pr comment is testing.
**This should be bold**
61 changes: 61 additions & 0 deletions .github/workflows/runbenchandupload.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,61 @@
name: RunUploadBenchmarks
env:
JULIA_NUM_THREADS: auto
on:
pull_request:
types: [labeled]
branches:
- master
- benchx
jobs:
benchmark:
if: ${{ github.event.label.name == 'to-benchmark' }}
name: Julia ${{ matrix.version }} - ${{ matrix.os }} - ${{ matrix.arch }} - ${{ github.event_name }}
runs-on: ${{ matrix.os }}
continue-on-error: ${{ matrix.allow_failure }}
strategy:
fail-fast: false
matrix:
version:
- '1'
os:
- ubuntu-latest
arch:
- x64
include:
- version: '1'
allow_failure: false
steps:
- uses: actions/checkout@v4
- uses: julia-actions/setup-julia@latest
with:
version: ${{ matrix.version }}
arch: ${{ matrix.arch }}
- uses: actions/cache@v3
env:
cache-name: cache-artifacts
with:
path: ~/.julia/artifacts
key: ${{ runner.os }}-test-${{ env.cache-name }}-${{ hashFiles('**/Project.toml') }}
restore-keys: |
${{ runner.os }}-test-${{ env.cache-name }}-
${{ runner.os }}-test-
${{ runner.os }}-
- uses: julia-actions/julia-buildpkg@latest
- name: install dependencies
run: julia -e 'using Pkg; pkg"add PkgBenchmark [email protected]"'
- name: Run benchmark judge
run: julia -e "
using BenchmarkCI, PkgBenchmark;
jd=BenchmarkCI.judge(baseline=\"origin/${GITHUB_BASE_REF}\");
"
- name: Save PR number
env:
PR_NUMBER: ${{ github.event.pull_request.number }}
run: |
echo $PR_NUMBER > .benchmarkci/pr_number
- uses: actions/upload-artifact@v3
with:
name: BenchmarkResults
path: .benchmarkci/
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,8 @@ docs/build/
docs/site/
benchmark/.results/*
benchmark/.tune.jld
benchmark/Manifest.toml
.benchmarkci
*.cov
/Manifest.toml
/docs/Manifest.toml
Expand Down
7 changes: 7 additions & 0 deletions benchmark/Project.toml
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
[deps]
BenchmarkTools = "6e4b80f9-dd63-53aa-95a3-0cdb28fa8baf"
Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"

[compat]
BenchmarkTools = "1.5"
32 changes: 24 additions & 8 deletions benchmark/benchmarks.jl
Original file line number Diff line number Diff line change
@@ -1,18 +1,34 @@
using BenchmarkTools
using Graphs
using BenchmarkTools, Graphs

DIGRAPHS = Dict{String,DiGraph}(
const BENCHDIR = dirname(@__FILE__)

const DIGRAPHS = Dict{String,DiGraph}(
"complete100" => complete_digraph(100), "path500" => path_digraph(500)
)

GRAPHS = Dict{String,Graph}(
const GRAPHS = Dict{String,Graph}(
"complete100" => complete_graph(100),
"tutte" => smallgraph(:tutte),
"path500" => path_graph(500),
)

suite = BenchmarkGroup()
include("core.jl")
serialbenchmarks = [
"serial/core.jl",
"serial/connectivity.jl",
"serial/centrality.jl",
"serial/edges.jl",
"serial/insertions.jl",
"serial/traversals.jl",
]

const SUITE = BenchmarkGroup()

foreach(serialbenchmarks) do bm
include(bm)
end

parallelbenchmarks = ["parallel/egonets.jl"]

tune!(suite);
results = run(suite; verbose=true, seconds=10)
foreach(parallelbenchmarks) do bm
include(joinpath(BENCHDIR, bm))
end
26 changes: 0 additions & 26 deletions benchmark/centrality.jl

This file was deleted.

14 changes: 0 additions & 14 deletions benchmark/connectivity.jl

This file was deleted.

39 changes: 0 additions & 39 deletions benchmark/core.jl

This file was deleted.

4 changes: 0 additions & 4 deletions benchmark/insertions.jl

This file was deleted.

97 changes: 46 additions & 51 deletions benchmark/parallel/egonets.jl
Original file line number Diff line number Diff line change
@@ -1,62 +1,57 @@
using Graphs
using BenchmarkTools
@show Threads.nthreads()

@benchgroup "parallel" begin
@benchgroup "egonet" begin
function vertex_function(g::Graph, i::Int)
a = 0
for u in neighbors(g, i)
a += degree(g, u)
end
return a
end

function twohop(g::Graph, i::Int)
a = 0
for u in neighbors(g, i)
for v in neighbors(g, u)
a += degree(g, v)
end
end
return a
end

function mapvertices(f, g::Graph)
n = nv(g)
a = zeros(Int, n)
Threads.@threads for i in 1:n
a[i] = f(g, i)
end
return a
end
SUITE["parallel"] = BenchmarkGroup([], "egonet" => BenchmarkGroup([]))

function mapvertices_single(f, g)
n = nv(g)
a = zeros(Int, n)
for i in 1:n
a[i] = f(g, i)
end
return a
end
SUITE["serial"] = BenchmarkGroup([], "egonet" => BenchmarkGroup([]))

function comparison(f, g)
println("Mulithreaded on $(Threads.nthreads())")
b1 = @benchmarkable mapvertices($f, $g)
println(b1)
function vertex_function(g::Graph, i::Int)
a = 0
for u in neighbors(g, i)
a += degree(g, u)
end
return a
end

println("singlethreaded")
b2 = @benchmarkable mapvertices_single($f, $g)
println(b2)
return println("done")
function twohop(g::Graph, i::Int)
a = 0
for u in neighbors(g, i)
for v in neighbors(g, u)
a += degree(g, v)
end
end
return a
end

nv_ = 10000
g = SimpleGraph(nv_, 64 * nv_)
f = vertex_function
println(g)
function mapvertices(f, g::Graph)
n = nv(g)
a = zeros(Int, n)
Threads.@threads for i in 1:n
a[i] = f(g, i)
end
return a
end

comparison(vertex_function, g)
comparison(twohop, g)
function mapvertices_single(f, g)
n = nv(g)
a = zeros(Int, n)
for i in 1:n
a[i] = f(g, i)
end
return a
end

let
nv_ = 10000
g = SimpleGraph(nv_, 64 * nv_)

SUITE["parallel"]["egonet"]["vertexfunction"] = @benchmarkable mapvertices(
$vertex_function, $g
)
SUITE["parallel"]["egonet"]["twohop"] = @benchmarkable mapvertices($twohop, $g)

SUITE["serial"]["egonet"]["vertexfunction"] = @benchmarkable mapvertices_single(
$vertex_function, $g
)
SUITE["serial"]["egonet"]["twohop"] = @benchmarkable mapvertices_single($twohop, $g)
end
Loading

0 comments on commit e773bce

Please sign in to comment.