diff --git a/src/ReTestItems.jl b/src/ReTestItems.jl index a6ff0beb..f09ffe24 100644 --- a/src/ReTestItems.jl +++ b/src/ReTestItems.jl @@ -673,14 +673,14 @@ function include_testfiles!(project_name, projectfile, paths, ti_filter::TestIte # we set it below in tls as __RE_TEST_SETUPS__ for each included file setup_channel = Channel{Pair{Symbol, TestSetup}}(Inf) setup_task = @spawn begin - setups = Dict{Symbol, TestSetup}() - for (name, setup) in setup_channel - if haskey(setups, name) + testsetups = Dict{Symbol, TestSetup}() + for (name, ts) in setup_channel + if haskey(testsetups, name) @warn "Encountered duplicate @testsetup with name: `$name`. Replacing..." end - setups[name] = setup + testsetups[name] = ts end - return setups + return testsetups end hidden_re = r"\.\w" @sync for (root, d, files) in Base.walkdir(project_root) @@ -732,21 +732,21 @@ function include_testfiles!(project_name, projectfile, paths, ti_filter::TestIte # prune empty directories/files close(setup_channel) prune!(root_node) - ti = TestItems(root_node) - flatten_testitems!(ti) - check_ids(ti.testitems) - setups = fetch(setup_task) - for (i, x) in enumerate(ti.testitems) + tis = TestItems(root_node) + flatten_testitems!(tis) + check_ids(tis.testitems) + testsetups = fetch(setup_task) + for (i, ti) in enumerate(tis.testitems) # set a unique number for each testitem - x.number[] = i + ti.number[] = i # populate testsetups for each testitem - for s in x.setups - if haskey(setups, s) - push!(x.testsetups, setups[s]) + for setup_name in ti.setups + if haskey(testsetups, setup_name) + push!(ti.testsetups, setup_name => testsetups[setup_name]) end end end - return ti, setups # only returned for testing + return tis, testsetups # only returned for testing end function check_ids(testitems) @@ -829,28 +829,36 @@ function with_source_path(f, path) end end -function ensure_setup!(ctx::TestContext, setup::Symbol, setups::Vector{TestSetup}, logs::Symbol) +# Call `runtestsetup(ts, ...)` for each `ts::Testsetup` required by the given `TestItem` +# Return `Dict` mapping `setup_name::Symbol => module_name::Symbol` +function runtestsetups(ti::TestItem, ctx::TestContext; logs::Symbol) + # Initialse with the list of _requested_ setups, so that if it no setup by that name was + # found when including files we return the setup name as the module name. Attempting to + # import that name, like `using $setup`, will then throw an appropriate error. + setup_to_mod = Dict{Symbol,Symbol}(ti.setups .=> ti.setups) + for (ts_name, ts) in ti.testsetups + @debugv 1 "Ensuring setup for test item $(repr(ti.name)) $(ts_name)$(_on_worker())." + setup_to_mod[ts_name] = runtestsetup(ts, ctx; logs) + end + return setup_to_mod +end + +# Run the given `TestSetup`, add the resulting `Module` to the `TestContext` and returns the +# name of the `Module` (i.e. returns a `Symbol`). +# If the `TestSetup` has already been evaluated on this process and so is already in the +# `TestContext`, simply returns the `Module` name. +function runtestsetup(ts::TestSetup, ctx::TestContext; logs::Symbol) mods = ctx.setups_evaled @lock mods.lock begin - mod = get(mods.modules, setup, nothing) + mod = get(mods.modules, ts.name, nothing) if mod !== nothing # we've eval-ed this module before, so just return the module name return nameof(mod) end - # we haven't eval-ed this module before, so we need to eval it - i = findfirst(s -> s.name == setup, setups) - if i === nothing - # if the setup hasn't been eval-ed before and we don't have it - # in our testsetups, then it was never found during including - # in that case, we return the expected test setup module name - # which will turn into a `using $setup` in the test item - # which will throw an appropriate error - return setup - end - ts = setups[i] - # In case the setup fails to eval, we discard its logs -- the setup will be - # attempted to eval for each of the dependent test items and we'd for each - # failed test item, we'd print the cumulative logs from all the previous attempts. + # We haven't eval-ed this module before, so we need to eval it. + # In case the setup fails to eval, we discard its logs -- we will attempt to eval + # this testsetup for each of the dependent test items, and then for each failed + # test item, we'd print the cumulative logs from all the previous attempts. isassigned(ts.logstore) && close(ts.logstore[]) ts.logstore[] = open(logpath(ts), "w") mod_expr = :(module $(gensym(ts.name)) end) @@ -860,7 +868,7 @@ function ensure_setup!(ctx::TestContext, setup::Symbol, setups::Vector{TestSetup with_source_path(() -> Core.eval(Main, mod_expr), ts.file) end # add the new module to our TestSetupModules - mods.modules[setup] = newmod + mods.modules[ts.name] = newmod return nameof(newmod) end end @@ -908,9 +916,9 @@ function runtestitem(ti::TestItem; kw...) # make a fresh TestSetupModules for each testitem run GLOBAL_TEST_CONTEXT_FOR_TESTING.setups_evaled = TestSetupModules() empty!(ti.testsetups) - for setup in ti.setups - ts = get(GLOBAL_TEST_SETUPS_FOR_TESTING, setup, nothing) - ts !== nothing && push!(ti.testsetups, ts) + for setup_name in ti.setups + ts = get(GLOBAL_TEST_SETUPS_FOR_TESTING, setup_name, nothing) + ts !== nothing && push!(ti.testsetups, setup_name => ts) end runtestitem(ti, GLOBAL_TEST_CONTEXT_FOR_TESTING; kw...) end @@ -954,23 +962,16 @@ function runtestitem( prev = get(task_local_storage(), :__TESTITEM_ACTIVE__, false) task_local_storage()[:__TESTITEM_ACTIVE__] = true try - for setup in ti.setups - # TODO(nhd): Consider implementing some affinity to setups, so that we can - # prefer to send testitems to the workers that have already eval'd setups. - # Or maybe allow user-configurable grouping of test items by worker? - # Or group them by file by default? - - # ensure setup has been evaled before - @debugv 1 "Ensuring setup for test item $(repr(name)) $(setup)$(_on_worker())." - ts_mod = ensure_setup!(ctx, setup, ti.testsetups, logs) - # eval using in our @testitem module - @debugv 1 "Importing setup for test item $(repr(name)) $(setup)$(_on_worker())." + # eval `using $TestSetup` in our @testitem module. + testsetups = runtestsetups(ti, ctx; logs) + for (ts_name, ts_module_name) in testsetups + @debugv 1 "Add setup imports for test item $(repr(name)) $(setup_name)$(_on_worker())." # We look up the testsetups from Main (since tests are eval'd in their own # temporary anonymous module environment.) - push!(body.args, Expr(:using, Expr(:., :Main, ts_mod))) - # ts_mod is a gensym'd name so that setup modules don't clash - # so we set a const alias inside our @testitem module to make things work - push!(body.args, :(const $setup = $ts_mod)) + push!(body.args, Expr(:using, Expr(:., :Main, ts_module_name))) + # ts_module_name is a gensym'd name so that setup modules don't clash, + # so set a const alias inside our @testitem module to make things work. + push!(body.args, :(const $ts_name = $ts_module_name)) end @debugv 1 "Setup for test item $(repr(name)) done$(_on_worker())." @@ -1013,7 +1014,7 @@ function runtestitem( LineNumberNode(ti.line, ti.file))) finally # Make sure all test setup logs are commited to file - foreach(ts->isassigned(ts.logstore) && flush(ts.logstore[]), ti.testsetups) + foreach(ts->isassigned(ts.logstore) && flush(ts.logstore[]), values(ti.testsetups)) ts1 = Test.pop_testset() task_local_storage()[:__TESTITEM_ACTIVE__] = prev @assert ts1 === ts diff --git a/src/log_capture.jl b/src/log_capture.jl index 6d36ac9d..f48bb27f 100644 --- a/src/log_capture.jl +++ b/src/log_capture.jl @@ -172,7 +172,7 @@ function print_errors_and_captured_logs( ) ts = ti.testsets[run_number] has_errors = any_non_pass(ts) - has_logs = _has_logs(ti, run_number) || any(_has_logs, ti.testsetups) + has_logs = _has_logs(ti, run_number) || any(_has_logs, values(ti.testsetups)) if has_errors || logs == :batched report_iob = IOContext(IOBuffer(), :color=>Base.get_have_color()) println(report_iob) @@ -214,7 +214,7 @@ end # the captured logs or a messgage that no logs were captured. `print_errors_and_captured_logs` # will call this function only if some logs were collected or when called with `verbose_results`. function _print_captured_logs(io, ti::TestItem, run_number::Int) - for setup in ti.testsetups + for (_name, setup) in ti.testsetups _print_captured_logs(io, setup, ti) end has_logs = _has_logs(ti, run_number) diff --git a/src/macros.jl b/src/macros.jl index a231de84..60f585a5 100644 --- a/src/macros.jl +++ b/src/macros.jl @@ -125,7 +125,7 @@ struct TestItem line::Int project_root::String code::Any - testsetups::Vector{TestSetup} # populated by runtests coordinator + testsetups::Dict{Symbol,TestSetup} # populated by runtests coordinator workerid::Base.RefValue{Int} # populated when the test item is scheduled testsets::Vector{DefaultTestSet} # populated when the test item is finished running eval_number::Base.RefValue{Int} # to keep track of how many items have been run so far @@ -136,7 +136,7 @@ function TestItem(number, name, id, tags, default_imports, setups, retries, time _id = @something(id, repr(hash(name, hash(relpath(file, project_root))))) return TestItem( number, name, _id, tags, default_imports, setups, retries, timeout, skip, file, line, project_root, code, - TestSetup[], + Dict{Symbol,TestSetup}(), Ref{Int}(0), DefaultTestSet[], Ref{Int}(0), diff --git a/test/log_capture.jl b/test/log_capture.jl index 78d80fb2..41465c42 100644 --- a/test/log_capture.jl +++ b/test/log_capture.jl @@ -34,8 +34,8 @@ end setup1 = @testsetup module TheTestSetup1 end setup2 = @testsetup module TheTestSetup2 end ti = TestItem(Ref(42), "TheTestItem", "ID007", [], false, [], 0, nothing, false, "source/path", 42, ".", nothing) - push!(ti.testsetups, setup1) - push!(ti.testsetups, setup2) + push!(ti.testsetups, :TheTestSetup1 => setup1) + push!(ti.testsetups, :TheTestSetup2 => setup2) push!(ti.testsets, Test.DefaultTestSet("dummy")) setup1.logstore[] = open(ReTestItems.logpath(setup1), "w") setup2.logstore[] = open(ReTestItems.logpath(setup2), "w")