Skip to content

Commit

Permalink
fix: update for array
Browse files Browse the repository at this point in the history
  • Loading branch information
agdestein committed Nov 4, 2024
1 parent 3b3c7fe commit 003f739
Show file tree
Hide file tree
Showing 16 changed files with 214 additions and 153 deletions.
2 changes: 2 additions & 0 deletions lib/NeuralClosure/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66"
DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
IncompressibleNavierStokes = "5e318141-6589-402b-868d-77d7df8c442e"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Lux = "b2108857-7c20-44ae-9111-449ecde12c47"
Expand All @@ -28,6 +29,7 @@ Accessors = "0.1"
ComponentArrays = "0.15"
DocStringExtensions = "0.9"
IncompressibleNavierStokes = "2"
JLD2 = "0.5.7"
KernelAbstractions = "0.9"
LinearAlgebra = "1"
Lux = "1"
Expand Down
1 change: 1 addition & 0 deletions lib/NeuralClosure/src/NeuralClosure.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ using ComponentArrays: ComponentArray
using DocStringExtensions
using IncompressibleNavierStokes
using IncompressibleNavierStokes: Dimension, momentum!, apply_bc_u!, project!
using JLD2
using KernelAbstractions
using LinearAlgebra
using Lux
Expand Down
10 changes: 8 additions & 2 deletions lib/NeuralClosure/src/closure.jl
Original file line number Diff line number Diff line change
@@ -1,14 +1,20 @@
"""
Wrap closure model and parameters so that it can be used in the solver.
"""
wrappedclosure(m) =
function wrappedclosure(m, setup)
(; Iu) = setup.grid
inside = Iu[1]
@assert all(==(inside), Iu) "Only periodic grids are supported"
function neuralclosure(u, θ)
s = size(u)
u = reshape(u, s..., 1) # Add sample dim
# u = view(u, inside, :)
u = u[inside, :]
u = reshape(u, size(u)..., 1) # Add sample dim
mu = m(u, θ)
mu = pad_circular(mu, 1)
mu = reshape(mu, s) # Remove sample dim
end
end

"""
Create neural closure model from layers.
Expand Down
28 changes: 22 additions & 6 deletions lib/NeuralClosure/src/data_generation.jl
Original file line number Diff line number Diff line change
Expand Up @@ -57,20 +57,20 @@ end
"""
Save filtered DNS data.
"""
filtersaver(
function filtersaver(
dns,
les,
filters,
compression,
psolver_dns,
psolver_les;
nupdate = 1,
filenames,
F = vectorfield(dns),
p = scalarfield(dns),
) =
processor(
(results, state) -> (; results..., comptime = time() - results.comptime),
) do state
)
@assert isnothing(filenames) || length(filenames) == length(les) * length(filters)
function initialize(state)
comptime = time()
t = fill(state[].t, 0)
dnsobs = Observable((; state[].u, F, state[].t))
Expand Down Expand Up @@ -105,6 +105,20 @@ filtersaver(
state[] = state[] # Save initial conditions
results
end
function finalize(results, state)
comptime = time() - results.comptime
(; data, t) = results
map(enumerate(data)) do (i, data)
(; u, c) = data
u = stack(u)
c = stack(c)
results = (; u, c, t, comptime)
isnothing(filenames) || jldsave(filenames[i]; results...)
results
end
end
processor(initialize, finalize)
end

"""
Create filtered DNS data.
Expand All @@ -126,6 +140,7 @@ function create_les_data(;
icfunc = (setup, psolver, rng) -> random_field(setup, typeof(Re)(0); psolver, rng),
processors = (; log = timelogger(; nupdate = 10)),
rng,
filenames = nothing,
kwargs...,
)
T = typeof(Re)
Expand Down Expand Up @@ -194,6 +209,7 @@ function create_les_data(;
psolver,
psolver_les;
nupdate = savefreq,
filenames,

# Reuse arrays from cache to save memory in 3D DNS.
# Since processors are called outside
Expand Down Expand Up @@ -224,7 +240,7 @@ function create_io_arrays(data, setup)
for it = 1:nt, α = 1:D
copyto!(
view(u, colons..., α, it),
view(getfield(trajectory, usym)[it], Iu[α], α),
view(getfield(trajectory, usym), Iu[α], α, it),
)
end
u
Expand Down
14 changes: 7 additions & 7 deletions lib/NeuralClosure/src/filter.jl
Original file line number Diff line number Diff line change
Expand Up @@ -25,8 +25,8 @@ struct VolumeAverage <: AbstractFilter end

function (::FaceAverage)(v, u, setup_les, comp)
(; grid, backend, workgroupsize) = setup_les
(; Nu, Iu) = grid
D = length(u)
(; dimension, Nu, Iu) = grid
D = dimension()
@kernel function Φ!(v, u, ::Val{α}, face, I0) where {α}
I = @index(Global, Cartesian)
J = I0 + comp * (I - oneunit(I))
Expand All @@ -48,8 +48,8 @@ end
"Reconstruct DNS velocity `u` from LES velocity `v`."
function reconstruct!(u, v, setup_dns, setup_les, comp)
(; grid, boundary_conditions, backend, workgroupsize) = setup_les
(; N) = grid
D = length(u)
(; dimension, N) = grid
D = dimension()
e = Offset(D)

Check warning on line 53 in lib/NeuralClosure/src/filter.jl

View check run for this annotation

Codecov / codecov/patch

lib/NeuralClosure/src/filter.jl#L50-L53

Added lines #L50 - L53 were not covered by tests
@assert all(bc -> bc[1] isa PeriodicBC && bc[2] isa PeriodicBC, boundary_conditions)
@kernel function R!(u, v, ::Val{α}, volume) where {α}
Expand Down Expand Up @@ -79,13 +79,13 @@ reconstruct(v, setup_dns, setup_les, comp) =

function (::VolumeAverage)(v, u, setup_les, comp)
(; grid, boundary_conditions, backend, workgroupsize) = setup_les
(; N, Nu, Iu) = grid
D = length(u)
(; dimension, N, Nu, Iu) = grid
D = dimension()
@assert all(bc -> bc[1] isa PeriodicBC && bc[2] isa PeriodicBC, boundary_conditions)
@kernel function Φ!(v, u, ::Val{α}, volume, I0) where {α}
I = @index(Global, Cartesian)
J = I0 + comp * (I - oneunit(I))
s = zero(eltype(v[α]))
s = zero(eltype(v))
# n = 0
for i in volume
# Periodic extension
Expand Down
45 changes: 34 additions & 11 deletions lib/NeuralClosure/src/groupconv.jl
Original file line number Diff line number Diff line change
Expand Up @@ -41,20 +41,42 @@ function rot2(u, r)
u[I, chans...]
end

# For vector fields (u, v)
"Rotate vector fields `[ux;;; uy]`"
function rot2(u::Tuple{T,T}, r) where {T}
# ux, uy = eachslice(u; dims = ndims(u))
ux, uy = u
r = mod(r, 4)
ru = rot2(u[1], r)
rv = rot2(u[2], r)
if r == 0
(ru, rv)
rx = rot2(ux, r)
ry = rot2(uy, r)
ru = if r == 0
(rx, ry)
elseif r == 1
(-ry, rx)
elseif r == 2
(-rx, -ry)
elseif r == 3
(ry, -rx)
end
ru
end

"Rotate vector fields `[ux;;; uy]`"
function vecrot2(u, r)
# ux, uy = eachslice(u; dims = ndims(u))
ux, uy = u[:, :, 1], u[:, :, 2]
r = mod(r, 4)
rx = rot2(ux, r)
ry = rot2(uy, r)
ru = if r == 0
(rx, ry)

Check warning on line 71 in lib/NeuralClosure/src/groupconv.jl

View check run for this annotation

Codecov / codecov/patch

lib/NeuralClosure/src/groupconv.jl#L71

Added line #L71 was not covered by tests
elseif r == 1
(-rv, ru)
(-ry, rx)
elseif r == 2
(-ru, -rv)
(-rx, -ry)

Check warning on line 75 in lib/NeuralClosure/src/groupconv.jl

View check run for this annotation

Codecov / codecov/patch

lib/NeuralClosure/src/groupconv.jl#L75

Added line #L75 was not covered by tests
elseif r == 3
(rv, -ru)
(ry, -rx)
end
stack(ru)
end

# # For augmented vector fields (u, v, -u, -v)
Expand All @@ -77,8 +99,9 @@ end
"Rotate staggered grid velocity field. See also [`rot2`](@ref)."
function rot2stag(u, g)
g = mod(g, 4)
u = rot2(u, g)
ux, uy = u
u = vecrot2(u, g)
# ux, uy = eachslice(u; dims = ndims(u))
ux, uy = u[:, :, 1], u[:, :, 2]
if g in (1, 2)
ux = circshift(ux, -1)
ux[end, :] .= ux[2, :]
Expand All @@ -87,7 +110,7 @@ function rot2stag(u, g)
uy = circshift(uy, (0, -1))
uy[:, end] .= uy[:, 2]
end
(ux, uy)
cat(ux, uy; dims = 3)
end

"""
Expand Down
73 changes: 36 additions & 37 deletions lib/NeuralClosure/src/training.jl
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ create_dataloader_post(trajectories; ntrajectory, nunroll, device = identity) =
@assert nt nunroll "Trajectory too short for nunroll = $nunroll"
istart = rand(rng, 1:nt-nunroll)
it = istart:istart+nunroll
(; u = device.(u[it]), t = t[it])
u = selectdim(u, ndims(u), it) |> Array |> device # convert view to array first
(; u, t = t[it])
end
data, rng
end
Expand Down Expand Up @@ -115,25 +116,25 @@ Create a-posteriori loss function.
function create_loss_post(; setup, method, psolver, closure, nsubstep = 1)
closure_model = wrappedclosure(closure, setup)
setup = (; setup..., closure_model)
(; dimension, Iu, x) = setup.grid
D = dimension()
(; Iu) = setup.grid
inside = Iu[1]
@assert all(==(inside), Iu)
loss_post(data, θ) =
sum(data) do (; u, t)
T = eltype(θ)
v = u[1]
ules = selectdim(u, ndims(u), 1) |> collect
stepper =
create_stepper(method; setup, psolver, u = v, temp = nothing, t = t[1])
create_stepper(method; setup, psolver, u = ules, temp = nothing, t = t[1])
loss = zero(T)
for it = 2:length(t)
Δt = (t[it] - t[it-1]) / nsubstep
for isub = 1:nsubstep
stepper = timestep(method, stepper, Δt; θ)
end
a, b = T(0), T(0)
for α = 1:length(u[1])
a += sum(abs2, (stepper.u[α]-u[it][α])[Iu[α]])
b += sum(abs2, u[it][α][Iu[α]])
end
uref = view(u, inside, :, it)
ules = view(stepper.u, inside, :)
a = sum(abs2, ules - uref)
b = sum(abs2, uref)
loss += a / b
end
loss / (length(t) - 1)
Expand All @@ -145,14 +146,15 @@ Create a-posteriori relative error.
"""
function create_relerr_post(; data, setup, method, psolver, closure_model, nsubstep = 1)
setup = (; setup..., closure_model)
(; dimension, Iu) = setup.grid
D = dimension()
(; Iu) = setup.grid
inside = Iu[1]
@assert all(==(inside), Iu)
(; u, t) = data
v = copy.(u[1])
v = selectdim(u, ndims(u), 1) |> collect
cache = IncompressibleNavierStokes.ode_method_cache(method, setup)
function relerr_post(θ)
T = eltype(u[1][1])
copyto!.(v, u[1])
T = eltype(u)
copyto!(v, selectdim(u, ndims(u), 1))
stepper = create_stepper(method; setup, psolver, u = v, temp = nothing, t = t[1])
e = zero(T)
for it = 2:length(t)
Expand All @@ -161,13 +163,10 @@ function create_relerr_post(; data, setup, method, psolver, closure_model, nsubs
stepper =
IncompressibleNavierStokes.timestep!(method, stepper, Δt; θ, cache)
end
a, b = T(0), T(0)
for α = 1:D
# a += sum(abs2, (stepper.u[α]-u[it][α])[Iu[α]])
# b += sum(abs2, u[it][α][Iu[α]])
a += sum(abs2, view(stepper.u[α] - u[it][α], Iu[α]))
b += sum(abs2, view(u[it][α], Iu[α]))
end
uref = view(u, inside, :, it)
ules = view(stepper.u, inside, :)
a = sum(abs2, ules - uref)
b = sum(abs2, uref)
e += sqrt(a) / sqrt(b)
end
e / (length(t) - 1)
Expand All @@ -189,15 +188,17 @@ function create_relerr_symmetry_post(;
(; dimension, Iu) = setup.grid
D = dimension()
T = eltype(u[1])
inside = Iu[1]
@assert all(==(inside), Iu)
cache = IncompressibleNavierStokes.ode_method_cache(method, setup)
function err(θ)
stepper =
create_stepper(method; setup, psolver, u = copy.(u), temp = nothing, t = T(0))
create_stepper(method; setup, psolver, u = copy(u), temp = nothing, t = T(0))
stepper_rot = create_stepper(
method;
setup,
psolver,
u = rot2stag(copy.(u), g),
u = rot2stag(copy(u), g),
temp = nothing,
t = T(0),
)
Expand All @@ -207,11 +208,8 @@ function create_relerr_symmetry_post(;
stepper_rot =
IncompressibleNavierStokes.timestep!(method, stepper_rot, Δt; θ, cache)
u_rot = rot2stag(stepper.u, g)
a, b = T(0), T(0)
for α = 1:D
a += sum(abs2, view(stepper_rot.u[α] - u_rot[α], Iu[α]))
b += sum(abs2, view(u_rot[α], Iu[α]))
end
a = sum(abs2, view(stepper_rot.u - u_rot, inside, :))
b = sum(abs2, view(u_rot, inside, :))
e += sqrt(a) / sqrt(b)
end
e / nstep
Expand All @@ -225,16 +223,17 @@ function create_relerr_symmetry_prior(; u, setup, g = 1)
(; grid, closure_model) = setup
(; dimension, Iu) = grid
D = dimension()
T = eltype(u[1][1])
T = eltype(u[1])
inside = Iu[1]
@assert all(==(inside), Iu)
function err(θ)
e = sum(u) do u
e = sum(eachslice(u; dims = ndims(u))) do u
cr = closure_model(rot2stag(u, g), θ)
rc = rot2stag(closure_model(u, θ), g)
a, b = T(0), T(0)
for α = 1:D
a += sum(abs2, view(rc[α] - cr[α], Iu[α]))
b += sum(abs2, view(cr[α], Iu[α]))
end
cr = view(cr, inside, :)
rc = view(rc, inside, :)
a = sum(abs2, rc - cr)
b = sum(abs2, cr)
sqrt(a) / sqrt(b)
end
e / length(u)
Expand Down Expand Up @@ -306,6 +305,6 @@ function create_callback(
(; callbackstate, callback)
end

getlearningrate(r) = -1 # Fallback

Check warning on line 308 in lib/NeuralClosure/src/training.jl

View check run for this annotation

Codecov / codecov/patch

lib/NeuralClosure/src/training.jl#L308

Added line #L308 was not covered by tests
getlearningrate(r::Adam) = r.eta
getlearningrate(r::OptimiserChain{Tuple{Adam,WeightDecay}}) = r.opts[1].eta
getlearningrate(r) = -1
Loading

0 comments on commit 003f739

Please sign in to comment.