Skip to content

Commit c09513c

Browse files
committed
Remove old example for now
1 parent 08b555a commit c09513c

File tree

11 files changed

+25
-206
lines changed

11 files changed

+25
-206
lines changed

.buildkite/pipeline.yml

-6
Original file line numberDiff line numberDiff line change
@@ -7,9 +7,6 @@ steps:
77
test_args: "--quickfail"
88
- JuliaCI/julia-coverage#v1:
99
codecov: true
10-
dirs:
11-
- src
12-
- ext
1310
agents:
1411
queue: "juliagpu"
1512
cuda: "*"
@@ -30,9 +27,6 @@ steps:
3027
test_args: "--quickfail"
3128
- JuliaCI/julia-coverage#v1:
3229
codecov: true
33-
dirs:
34-
- src
35-
- ext
3630
env:
3731
JULIA_AMDGPU_CORE_MUST_LOAD: "1"
3832
JULIA_AMDGPU_HIP_MUST_LOAD: "1"

.github/workflows/CI.yml

-2
Original file line numberDiff line numberDiff line change
@@ -41,8 +41,6 @@ jobs:
4141
RETESTITEMS_NWORKERS: 4
4242
RETESTITEMS_NWORKER_THREADS: 2
4343
- uses: julia-actions/julia-processcoverage@v1
44-
with:
45-
directories: src,ext
4644
- uses: codecov/codecov-action@v4
4745
with:
4846
files: lcov.info

LocalPreferences.toml

+2
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,2 @@
1+
[LuxTestUtils]
2+
target_modules = ["LuxNeuralOperators", "Lux", "LuxLib"]

codecov.yml

+3
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
codecov:
2+
notify:
3+
wait_for_ci: false

examples/Burgers/main.jl

-51
This file was deleted.

examples/Project.toml

-14
This file was deleted.

examples/common.jl

-117
This file was deleted.

src/transform.jl

+2-2
Original file line numberDiff line numberDiff line change
@@ -28,7 +28,7 @@ end
2828

2929
@inline truncate_modes(ft::FourierTransform, x_fft::AbstractArray) = low_pass(ft, x_fft)
3030

31-
function inverse(ft::FourierTransform, x_fft::AbstractArray{T, N},
32-
M::NTuple{N, Int64}) where {T, N}
31+
function inverse(
32+
ft::FourierTransform, x_fft::AbstractArray{T, N}, M::NTuple{N, Int64}) where {T, N}
3333
return real(irfft(x_fft, first(M), 1:ndims(ft)))
3434
end

test/fno_tests.jl

+11-8
Original file line numberDiff line numberDiff line change
@@ -1,20 +1,23 @@
11
@testitem "Fourier Neural Operator" setup=[SharedTestSetup] begin
22
@testset "BACKEND: $(mode)" for (mode, aType, dev, ongpu) in MODES
3-
rng = get_default_rng(mode)
3+
rng = get_stable_rng()
44

55
setups = [
6-
(modes=(16,), chs=(2, 64, 64, 64, 64, 64, 128, 1), x_size=(2, 1024, 5),
7-
y_size=(1, 1024, 5), permuted=Val(false)),
8-
(modes=(16,), chs=(2, 64, 64, 64, 64, 64, 128, 1), x_size=(1024, 2, 5),
9-
y_size=(1024, 1, 5), permuted=Val(true))]
6+
(modes=(16,), chs=(2, 64, 64, 64, 64, 64, 128, 1),
7+
x_size=(2, 1024, 5), y_size=(1, 1024, 5), permuted=Val(false)),
8+
(modes=(16,), chs=(2, 64, 64, 64, 64, 64, 128, 1),
9+
x_size=(1024, 2, 5), y_size=(1024, 1, 5), permuted=Val(true))]
1010

1111
@testset "$(length(setup.modes))D: permuted = $(setup.permuted)" for setup in setups
1212
fno = FourierNeuralOperator(; setup.chs, setup.modes, setup.permuted)
1313

14-
x = rand(rng, Float32, setup.x_size...)
15-
y = rand(rng, Float32, setup.y_size...)
14+
x = rand(rng, Float32, setup.x_size...) |> aType
15+
y = rand(rng, Float32, setup.y_size...) |> aType
1616

17-
ps, st = Lux.setup(rng, fno)
17+
ps, st = Lux.setup(rng, fno) |> dev
18+
19+
@inferred fno(x, ps, st)
20+
@jet fno(x, ps, st)
1821

1922
@test size(first(fno(x, ps, st))) == setup.y_size
2023

test/layers_tests.jl

+6-5
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
@testitem "SpectralConv & SpectralKernel" setup=[SharedTestSetup] begin
22
@testset "BACKEND: $(mode)" for (mode, aType, dev, ongpu) in MODES
3-
rng = get_default_rng(mode)
3+
rng = get_stable_rng()
44

55
opconv = [SpectralConv, SpectralKernel]
66
setups = [
@@ -11,7 +11,7 @@
1111
(; m=(10, 10), permuted=Val(true),
1212
x_size=(22, 22, 1, 5), y_size=(22, 22, 64, 5))]
1313

14-
@testset "$(op) $(length(setup.modes))D: permuted = $(setup.permuted)" for setup in setups,
14+
@testset "$(op) $(length(setup.m))D: permuted = $(setup.permuted)" for setup in setups,
1515
op in opconv
1616

1717
p = Lux.__unwrap_val(setup.permuted)
@@ -22,13 +22,14 @@
2222
l1 = p ? Conv(ntuple(_ -> 1, length(setup.m)), in_chs => first(ch)) :
2323
Dense(in_chs => first(ch))
2424
m = Chain(l1, op(ch, setup.m; setup.permuted))
25-
ps, st = Lux.setup(rng, m)
25+
ps, st = Lux.setup(rng, m) |> dev
2626

27-
x = rand(rng, Float32, setup.x_size...)
27+
x = rand(rng, Float32, setup.x_size...) |> aType
2828
@test size(first(m(x, ps, st))) == setup.y_size
2929
@inferred m(x, ps, st)
30+
@jet m(x, ps, st)
3031

31-
data = [(x, rand(rng, Float32, setup.y_size...))]
32+
data = [(x, aType(rand(rng, Float32, setup.y_size...)))]
3233
l2, l1 = train!(m, ps, st, data; epochs=10)
3334
@test l2 < l1
3435
end

test/shared_testsetup.jl

+1-1
Original file line numberDiff line numberDiff line change
@@ -43,7 +43,7 @@ default_loss_function(model, ps, x, y) = mean(abs2, y .- model(x, ps))
4343
train!(args...; kwargs...) = train!(default_loss_function, args...; kwargs...)
4444

4545
function train!(loss, model, ps, st, data; epochs=10)
46-
m = StatefulLuxLayer(model, ps, st)
46+
m = StatefulLuxLayer{true}(model, ps, st)
4747

4848
l1 = loss(m, ps, first(data)...)
4949
st_opt = Optimisers.setup(Adam(0.01f0), ps)

0 commit comments

Comments
 (0)