diff --git a/.gitignore b/.gitignore index 38ed5d8..714e03a 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,4 @@ Manifest.toml .vscode wip examples +docs/build \ No newline at end of file diff --git a/docs/.JuliaFormatter.toml b/docs/.JuliaFormatter.toml new file mode 100644 index 0000000..6c3434e --- /dev/null +++ b/docs/.JuliaFormatter.toml @@ -0,0 +1,10 @@ +style = "sciml" +format_markdown = true +whitespace_in_kwargs = false +margin = 92 +indent = 4 +format_docstrings = true +separate_kwargs_with_semicolon = true +always_for_in = true +annotate_untyped_fields_with_any = false +join_lines_based_on_source = false diff --git a/docs/pages.jl b/docs/pages.jl index f76f5ed..1e71de9 100644 --- a/docs/pages.jl +++ b/docs/pages.jl @@ -1,5 +1,9 @@ pages = [ - "Home" => "index.md", - # "fno_tut" => "tutorials/fno.md", - # "deeponet" => "tutorials/deeponet.md", -] \ No newline at end of file + "NeuralOperators.jl" => "index.md", + "Tutorials" => Any[ + "FNO" => "tutorials/fno.md", + "DeepONet" => "tutorials/deeponet.md", + "NOMAD" => "tutorials/nomad.md" + ], + "Building blocks" => "api.md" +] diff --git a/docs/src/api.md b/docs/src/api.md new file mode 100644 index 0000000..a8bcb4f --- /dev/null +++ b/docs/src/api.md @@ -0,0 +1,10 @@ +# API + +## Building blocks + +```@docs +OperatorConv +SpectralConv +OperatorKernel +SpectralKernel +``` \ No newline at end of file diff --git a/docs/src/assets/Project.toml b/docs/src/assets/Project.toml new file mode 100644 index 0000000..de8c908 --- /dev/null +++ b/docs/src/assets/Project.toml @@ -0,0 +1,3 @@ +[deps] +Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" +NeuralOperators = "ea5c82af-86e5-48da-8ee1-382d6ad7af4b" diff --git a/docs/src/index.md b/docs/src/index.md index b82bba9..113234d 100644 --- a/docs/src/index.md +++ b/docs/src/index.md @@ -1,17 +1,24 @@ # NeuralOperators -Learn mapping between function spaces ... +`NeuralOperators.jl` is a package written in Julia to provide the architectures for learning mapping between function spaces, and learning grid invariant solution of PDEs. It provides `Lux.jl` architectures, which you can train by writing your own training code or using the `Lux`'s [training API](https://lux.csail.mit.edu/stable/tutorials/beginner/2_PolynomialFitting) -## Fourier Neural Operators -blah blah... +## Installation -Tutorial : [fno_tutorial](tutorials/fno.md) +On Julia 1.10 and greater, you can install `NeuralOperators.jl` as -## DeepONets -blah blah... +```julia +import Pkg +Pkg.add("NeuralOperators") +``` + +Currently provided operator architectures are : -Tutorial : [deeponet_tutorial](tutorials/deeponet.md) +* [Fourier Neural Operators (FNOs)](tutorials/fno.md) +* [DeepONets](tutorials/deeponet.md) +* [Nonlinear Manifold Decoders for Operator Learning (NOMADs)](tutorials/nomad.md) +## Benchmark +TODO : Link to SciMLBenchmarks page ## Citation diff --git a/docs/src/tutorials/deeponet.md b/docs/src/tutorials/deeponet.md index 7e35932..4efc311 100644 --- a/docs/src/tutorials/deeponet.md +++ b/docs/src/tutorials/deeponet.md @@ -1,8 +1,20 @@ -# DeepONet +# DeepONets +DeepONets are another class of networks that learn the mapping between two function spaces by encoding the input function space and the location of the output space. The latent code of the input space is then projected on the location laten code to give the output. This allows the network to learn the mapping between two functions defined on different spaces. + + +```math +\begin{align*} +u(y) \xrightarrow{\text{branch}} & \; b \\ +& \quad \searrow\\ +&\quad \quad \mathcal{G}_{\theta} u(y) = \sum_k b_k t_k \\ +& \quad \nearrow \\ +y \; \; \xrightarrow{\text{trunk}} \; \; & t +\end{align*} +``` ## Usage -## docs +## API ```@docs DeepONet ``` \ No newline at end of file diff --git a/docs/src/tutorials/fno.md b/docs/src/tutorials/fno.md index 6d70870..24dd70c 100644 --- a/docs/src/tutorials/fno.md +++ b/docs/src/tutorials/fno.md @@ -1,8 +1,30 @@ -# FNO +# Fourier Neural Operators (FNOs) +FNOs are a subclass of Neural Operators that learn the learn the kernel $\Kappa_{\theta}$, parameterized on $\theta$ between function spaces: + +```math +(\Kappa_{\theta}u)(x) = \int_D \kappa_{\theta}(a(x), a(y), x, y) dy \quad \forall x \in D +``` + +The kernel makes up a block $v_t(x)$ which passes the information to the next block as: +```math +v^{(t+1)}(x) = \sigma((W^{(t)}v^{(t)} + \Kappa^{(t)}v^{(t)})(x)) +``` + +FNOs choose a specific kernel $\kappa(x,y) = \kappa(x-y)$, converting the kernel into a convolution operation, which can be efficiently computed in the fourier domain. + +```math +\begin{align*} +(\Kappa_{\theta}u)(x) +&= \int_D \kappa_{\theta}(x - y) dy \quad \forall x \in D\\ +&= \mathcal{F}^{-1}(\mathcal{F}(\kappa_{\theta}) \mathcal{F}(u))(x) \quad \forall x \in D +\end{align*} +``` +where $\mathcal{F}$ denotes the fourier transform. Usually, not all the modes in the frequency domain are used with the higher modes often being truncated. ## Usage -## docs + +## API ```@docs FourierNeuralOperator ``` \ No newline at end of file diff --git a/docs/src/tutorials/nomad.md b/docs/src/tutorials/nomad.md new file mode 100644 index 0000000..ad27c5a --- /dev/null +++ b/docs/src/tutorials/nomad.md @@ -0,0 +1,21 @@ +# Nonlinear Manifold Decoders for Operator Learning (NOMADs) +NOMADs are similar to DeepONets in the aspect that they can learn when the input and output function spaces are defined on different domains. Their architecture is different and use nonlinearity to the latent codes to obtain the operator approximation. +The architecture involves an approximator to encode the input function space, which is directly concatenated with the input function coordinates, and passed into a decoder net to give the output function at the given coordinate. + +```math +\begin{align*} +u(y) \xrightarrow{\mathcal{A}} & \; \beta \\ +& \quad \searrow\\ +&\quad \quad \mathcal{G}_{\theta} u(y) = \mathcal{D}(\beta, y) \\ +& \quad \nearrow \\ +y +\end{align*} +``` + + +## Usage + +## API +```@docs +NOMAD +``` \ No newline at end of file