Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Use arrays instead of tuples #102

Merged
merged 8 commits into from
Nov 4, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 5 additions & 5 deletions docs/src/manual/differentiability.md
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ IncompressibleNavierStokes is
which means that you can back-propagate gradients through the code.
This comes at a cost however, as intermediate velocity fields need to be stored
in memory for use in the backward pass. For this reason, many of the operators
come in two versions:oa slow differentiable allocating non-mutating variant (e.g.
come in two versions: a slow differentiable allocating non-mutating variant (e.g.
[`divergence`](@ref)) and fast non-differentiable non-allocating mutating
variant (e.g. [`divergence!`](@ref).)

Expand All @@ -26,7 +26,8 @@ make a time stepping loop composed of differentiable operations:

```julia
import IncompressibleNavierStokes as INS
setup = INS.Setup(; x = (0:0.01:1, 0:0.01:1), Re = 500.0)
ax = range(0, 1, 101)
setup = INS.Setup(; x = (ax, ax), Re = 500.0)
psolver = INS.default_psolver(setup)
method = INS.RKMethods.RK44P2()
Δt = 0.01
Expand All @@ -38,16 +39,15 @@ function final_energy(u)
stepper = INS.timestep(method, stepper, Δt)
end
(; u) = stepper
sum(abs2, u[1][Iu[1]]) / 2 + sum(abs2, u[2][Iu[2]]) / 2
sum(abs2, u[Iu[1], 1]) / 2 + sum(abs2, u[Iu[2], 2]) / 2
end

u = INS.random_field(setup)

using Zygote
g, = Zygote.gradient(final_energy, u)

@show size.(u)
@show size.(g)
@show size(u) size(g)
```

Now `g` is the gradient of `final_energy` with respect to the initial conditions
Expand Down
3 changes: 0 additions & 3 deletions docs/src/manual/time.md
Original file line number Diff line number Diff line change
Expand Up @@ -47,9 +47,6 @@ but they may be integrated in the future.

```@docs
AbstractODEMethod
isexplicit
lambda_conv_max
lambda_diff_max
ode_method_cache
runge_kutta_method
create_stepper
Expand Down
2 changes: 1 addition & 1 deletion examples/Kolmogorov2D.jl
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ ustart = random_field(setup, 0.0; A = 1e-2);
#
# Since the force is steady, it is just stored as a field.

heatmap(setup.bodyforce[1])
heatmap(setup.bodyforce[:, :, 1])

# ## Solve unsteady problem

Expand Down
11 changes: 5 additions & 6 deletions lib/Debug/perf.jl
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@ using Cthulhu
using KernelAbstractions

T = Float32
n = 512
n = 128
ax = range(T(0), T(1), n + 1)
setup = Setup(; x = (ax, ax, ax), Re = T(1e3), backend = CUDABackend())
u = random_field(setup)
Expand Down Expand Up @@ -38,18 +38,17 @@ let
@benchmark IncompressibleNavierStokes.convection_adjoint!($f, $g, $u, $setup)
end

f = vectorfield(setup)
f = vectorfield(setup);
let
fill!.(f, 0)
@benchmark IncompressibleNavierStokes.convectiondiffusion!($f, $u, $setup)
end

uu = stack(u)
uu = permutedims(uu, (4, 1, 2, 3))
ff = copy(uu)
uu = stack(u);
ff = copy(uu);
let
fill!(ff, 0)
@benchmark IncompressibleNavierStokes.cd2!($ff, $uu, $setup)
@benchmark IncompressibleNavierStokes.arrayconvectiondiffusion!($ff, $uu, $setup)
end

f = vectorfield(setup)
Expand Down
2 changes: 2 additions & 0 deletions lib/NeuralClosure/Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ Accessors = "7d9f7c33-5ae7-4f3b-8dc6-eff91059b697"
ComponentArrays = "b0b7db55-cfe3-40fc-9ded-d10e2dbeff66"
DocStringExtensions = "ffbed154-4ef7-542d-bbb7-c09d3a79fcae"
IncompressibleNavierStokes = "5e318141-6589-402b-868d-77d7df8c442e"
JLD2 = "033835bb-8acc-5ee8-8aae-3f567f8a3819"
KernelAbstractions = "63c18a36-062a-441e-b654-da1e3ab1ce7c"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Lux = "b2108857-7c20-44ae-9111-449ecde12c47"
Expand All @@ -28,6 +29,7 @@ Accessors = "0.1"
ComponentArrays = "0.15"
DocStringExtensions = "0.9"
IncompressibleNavierStokes = "2"
JLD2 = "0.5.7"
KernelAbstractions = "0.9"
LinearAlgebra = "1"
Lux = "1"
Expand Down
1 change: 1 addition & 0 deletions lib/NeuralClosure/src/NeuralClosure.jl
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ using ComponentArrays: ComponentArray
using DocStringExtensions
using IncompressibleNavierStokes
using IncompressibleNavierStokes: Dimension, momentum!, apply_bc_u!, project!
using JLD2
using KernelAbstractions
using LinearAlgebra
using Lux
Expand Down
49 changes: 20 additions & 29 deletions lib/NeuralClosure/src/closure.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,35 +2,18 @@
Wrap closure model and parameters so that it can be used in the solver.
"""
function wrappedclosure(m, setup)
(; dimension, Iu) = setup.grid
D = dimension()
# function neuralclosure(u)
# u = stack(ntuple(α -> u[α][Iu[α]], D))
# u = reshape(u, size(u)..., 1) # One sample
# mu = m(u, θ)
# mu = pad_circular(mu, 1)
# sz..., _ = size(mu)
# i = ntuple(Returns(:), D)
# mu = ntuple(α -> mu[i..., α, 1], D)
# end
neuralclosure(u, θ) =
if D == 2
u = cat(u[1][Iu[1]], u[2][Iu[2]]; dims = 3)
u = reshape(u, size(u)..., 1) # One sample
# u = collocate(u)
mu = m(u, θ)
# mu = decollocate(mu)
mu = pad_circular(mu, 1)
mu = (mu[:, :, 1, 1], mu[:, :, 2, 1])
elseif D == 3
u = cat(u[1][Iu[1]], u[2][Iu[2]], u[3][Iu[3]]; dims = 4)
u = reshape(u, size(u)..., 1) # One sample
# u = collocate(u)
mu = m(u, θ)
# mu = decollocate(mu)
mu = pad_circular(mu, 1)
mu = (mu[:, :, :, 1, 1], mu[:, :, :, 2, 1], mu[:, :, :, 3, 1])
end
(; Iu) = setup.grid
inside = Iu[1]
@assert all(==(inside), Iu) "Only periodic grids are supported"
function neuralclosure(u, θ)
s = size(u)
# u = view(u, inside, :)
u = u[inside, :]
u = reshape(u, size(u)..., 1) # Add sample dim
mu = m(u, θ)
mu = pad_circular(mu, 1)
mu = reshape(mu, s) # Remove sample dim
end
end

"""
Expand Down Expand Up @@ -73,6 +56,14 @@ function collocate(u)
# v = (v + circshift(v, ntuple(β -> α == β ? -1 : 0, D + 1))) / 2
# end
if D == 2
# TODO: Check if this is more efficient as
# a convolution with the two channel kernels
# [1 1; 0 0] / 2
# and
# [0 1; 0 1] / 2
# TODO: Maybe skip this step entirely and learn the
# collocation function subject to the skewness
# constraint (left-skewed kernel from staggered right face to center)
a = selectdim(u, 3, 1)
b = selectdim(u, 3, 2)
a = (a .+ circshift(a, (1, 0, 0))) ./ 2
Expand Down
2 changes: 1 addition & 1 deletion lib/NeuralClosure/src/cnn.jl
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ function cnn(;
rng = Random.default_rng(),
)
r, c, σ, b = radii, channels, activations, use_bias
(; grid, boundary_conditions) = setup
(; grid) = setup
(; dimension, x) = grid
D = dimension()
T = eltype(x[1])
Expand Down
40 changes: 27 additions & 13 deletions lib/NeuralClosure/src/data_generation.jl
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ function lesdatagen(dnsobs, Φ, les, compression, psolver)
FΦ = vectorfield(les)
ΦF = vectorfield(les)
c = vectorfield(les)
results = (; u = fill(Array.(Φu), 0), c = fill(Array.(c), 0))
results = (; u = fill(Array(Φu), 0), c = fill(Array(c), 0))
temp = nothing
on(dnsobs) do (; u, F, t)
Φ(Φu, u, les, compression)
Expand All @@ -47,32 +47,30 @@ function lesdatagen(dnsobs, Φ, les, compression, psolver)
momentum!(FΦ, Φu, temp, t, les)
apply_bc_u!(FΦ, t, les; dudt = true)
project!(FΦ, les; psolver, p)
for α = 1:length(u)
c[α] .= ΦF[α] .- FΦ[α]
end
push!(results.u, Array.(Φu))
push!(results.c, Array.(c))
@. c = ΦF - FΦ
push!(results.u, Array(Φu))
push!(results.c, Array(c))
end
results
end

"""
Save filtered DNS data.
"""
filtersaver(
function filtersaver(
dns,
les,
filters,
compression,
psolver_dns,
psolver_les;
nupdate = 1,
filenames,
F = vectorfield(dns),
p = scalarfield(dns),
) =
processor(
(results, state) -> (; results..., comptime = time() - results.comptime),
) do state
)
@assert isnothing(filenames) || length(filenames) == length(les) * length(filters)
function initialize(state)
comptime = time()
t = fill(state[].t, 0)
dnsobs = Observable((; state[].u, F, state[].t))
Expand Down Expand Up @@ -107,6 +105,20 @@ filtersaver(
state[] = state[] # Save initial conditions
results
end
function finalize(results, state)
comptime = time() - results.comptime
(; data, t) = results
map(enumerate(data)) do (i, data)
(; u, c) = data
u = stack(u)
c = stack(c)
results = (; u, c, t, comptime)
isnothing(filenames) || jldsave(filenames[i]; results...)
results
end
end
processor(initialize, finalize)
end

"""
Create filtered DNS data.
Expand All @@ -128,6 +140,7 @@ function create_les_data(;
icfunc = (setup, psolver, rng) -> random_field(setup, typeof(Re)(0); psolver, rng),
processors = (; log = timelogger(; nupdate = 10)),
rng,
filenames = nothing,
kwargs...,
)
T = typeof(Re)
Expand Down Expand Up @@ -196,12 +209,13 @@ function create_les_data(;
psolver,
psolver_les;
nupdate = savefreq,
filenames,

# Reuse arrays from cache to save memory in 3D DNS.
# Since processors are called outside
# Runge-Kutta steps, there is no danger
# in overwriting the arrays.
F = cache.u₀,
F = cache.ustart,
p = cache.p,
),
),
Expand All @@ -226,7 +240,7 @@ function create_io_arrays(data, setup)
for it = 1:nt, α = 1:D
copyto!(
view(u, colons..., α, it),
view(getfield(trajectory, usym)[it][α], Iu[α]),
view(getfield(trajectory, usym), Iu[α], α, it),
)
end
u
Expand Down
Loading
Loading