From e260ff93a1659c5e231ee8a6d6bf80b432801047 Mon Sep 17 00:00:00 2001 From: Carlo Lucibello Date: Thu, 5 Dec 2024 17:08:39 +0100 Subject: [PATCH] improve GNNLux docs (#542) --- .gitignore | 1 + GNNGraphs/docs/make.jl | 8 +- GNNLux/docs/Project.toml | 4 +- GNNLux/docs/make.jl | 95 +++++++++++++++---- GNNLux/docs/src/api/basic.md | 2 +- GNNLux/docs/src/api/conv.md | 1 - GNNLux/docs/src/api/temporalconv.md | 2 - GNNLux/docs/src/guides/models.md | 87 +++++++++++++++++ GNNLux/docs/src/index.md | 14 ++- GraphNeuralNetworks/docs/make.jl | 6 +- GraphNeuralNetworks/docs/src/api/basic.md | 2 - GraphNeuralNetworks/docs/src/api/conv.md | 1 - .../docs/src/api/heteroconv.md | 2 - GraphNeuralNetworks/docs/src/api/pool.md | 2 - .../docs/src/api/temporalconv.md | 1 - GraphNeuralNetworks/docs/src/guides/models.md | 17 +--- GraphNeuralNetworks/docs/src/index.md | 6 +- 17 files changed, 193 insertions(+), 58 deletions(-) create mode 100644 GNNLux/docs/src/guides/models.md diff --git a/.gitignore b/.gitignore index 37a94c162..d7d1ab579 100644 --- a/.gitignore +++ b/.gitignore @@ -18,3 +18,4 @@ GraphNeuralNetworks/docs/build GraphNeuralNetworks/docs/src/GNNGraphs GraphNeuralNetworks/docs/src/GNNlib tutorials/docs/build +prova.jl diff --git a/GNNGraphs/docs/make.jl b/GNNGraphs/docs/make.jl index 94cecf6bf..a4d83a2b2 100644 --- a/GNNGraphs/docs/make.jl +++ b/GNNGraphs/docs/make.jl @@ -30,11 +30,9 @@ makedocs(; "Home" => "index.md", "Guides" => [ - "Graphs" => [ - "guides/gnngraph.md", - "guides/heterograph.md", - "guides/temporalgraph.md" - ], + "Graphs" => "guides/gnngraph.md", + "Heterogeneous Graphs" => "guides/heterograph.md", + "Temporal Graphs" => "guides/temporalgraph.md", "Datasets" => "guides/datasets.md", ], diff --git a/GNNLux/docs/Project.toml b/GNNLux/docs/Project.toml index 97644b929..462d54b23 100644 --- a/GNNLux/docs/Project.toml +++ b/GNNLux/docs/Project.toml @@ -1,6 +1,8 @@ [deps] Documenter = "e30172f5-a6a5-5a46-863b-614d45cd2de4" DocumenterInterLinks = "d12716ef-a0f6-4df4-a9f1-a5a34e75c656" +GNNGraphs = "aed8fd31-079b-4b5a-b342-a13352159b8c" GNNLux = "e8545f4d-a905-48ac-a8c4-ca114b98986d" GNNlib = "a6a84749-d869-43f8-aacc-be26a1996e48" -LiveServer = "16fef848-5104-11e9-1b77-fb7a48bbb589" +Graphs = "86223c79-3864-5bf0-83f7-82e725a168b6" +Lux = "b2108857-7c20-44ae-9111-449ecde12c47" diff --git a/GNNLux/docs/make.jl b/GNNLux/docs/make.jl index d8016d51d..6aaaf3de7 100644 --- a/GNNLux/docs/make.jl +++ b/GNNLux/docs/make.jl @@ -1,32 +1,85 @@ using Documenter -using DocumenterInterLinks -using GNNlib using GNNLux +using Lux, GNNGraphs, GNNlib, Graphs +using DocumenterInterLinks +DocMeta.setdocmeta!(GNNLux, :DocTestSetup, :(using GNNLux); recursive = true) +mathengine = MathJax3(Dict(:loader => Dict("load" => ["[tex]/require", "[tex]/mathtools"]), + :tex => Dict("inlineMath" => [["\$", "\$"], ["\\(", "\\)"]], + "packages" => [ + "base", + "ams", + "autoload", + "mathtools", + "require" + ]))) -assets=[] -prettyurls = get(ENV, "CI", nothing) == "true" -mathengine = MathJax3() interlinks = InterLinks( - "GNNGraphs" => ("https://carlolucibello.github.io/GraphNeuralNetworks.jl/GNNGraphs/", joinpath(dirname(dirname(@__DIR__)), "GNNGraphs", "docs", "build", "objects.inv")), - "GNNlib" => ("https://carlolucibello.github.io/GraphNeuralNetworks.jl/GNNlib/", joinpath(dirname(dirname(@__DIR__)), "GNNlib", "docs", "build", "objects.inv"))) - + "NNlib" => "https://fluxml.ai/NNlib.jl/stable/", + # "GNNGraphs" => ("https://carlolucibello.github.io/GraphNeuralNetworks.jl/GNNGraphs/", joinpath(dirname(dirname(@__DIR__)), "GNNGraphs", "docs", "build", "objects.inv")), + # "GNNlib" => ("https://carlolucibello.github.io/GraphNeuralNetworks.jl/GNNlib/", joinpath(dirname(dirname(@__DIR__)), "GNNlib", "docs", "build", "objects.inv")) + ) + +# Copy the docs from GNNGraphs and GNNlib. Will be removed at the end of the script +cp(joinpath(@__DIR__, "../../GNNGraphs/docs/src"), + joinpath(@__DIR__, "src/GNNGraphs"), force=true) +cp(joinpath(@__DIR__, "../../GNNlib/docs/src"), + joinpath(@__DIR__, "src/GNNlib"), force=true) + makedocs(; - modules = [GNNLux], - doctest = false, - clean = true, + modules = [GNNLux, GNNGraphs, GNNlib], + doctest = false, # TODO: enable doctest plugins = [interlinks], - format = Documenter.HTML(; mathengine, prettyurls, assets = assets, size_threshold=nothing), + format = Documenter.HTML(; mathengine, + prettyurls = get(ENV, "CI", nothing) == "true", + assets = [], + size_threshold=nothing, + size_threshold_warn=2000000), sitename = "GNNLux.jl", - pages = ["Home" => "index.md", - "API Reference" => [ - "Basic" => "api/basic.md", - "Convolutional layers" => "api/conv.md", - "Temporal Convolutional layers" => "api/temporalconv.md",], - ] - ) - + pages = [ + + "Home" => "index.md", + + "Guides" => [ + "Graphs" => "GNNGraphs/guides/gnngraph.md", + "Message Passing" => "GNNlib/guides/messagepassing.md", + "Models" => "guides/models.md", + "Datasets" => "GNNGraphs/guides/datasets.md", + "Heterogeneous Graphs" => "GNNGraphs/guides/heterograph.md", + "Temporal Graphs" => "GNNGraphs/guides/temporalgraph.md", + ], + + "API Reference" => [ + "Graphs (GNNGraphs.jl)" => [ + "GNNGraph" => "GNNGraphs/api/gnngraph.md", + "GNNHeteroGraph" => "GNNGraphs/api/heterograph.md", + "TemporalSnapshotsGNNGraph" => "GNNGraphs/api/temporalgraph.md", + "Samplers" => "GNNGraphs/api/samplers.md", + ] + + "Message Passing (GNNlib.jl)" => [ + "Message Passing" => "GNNlib/api/messagepassing.md", + "Other Operators" => "GNNlib/api/utils.md", + ] + + "Layers" => [ + "Basic layers" => "api/basic.md", + "Convolutional layers" => "api/conv.md", + # "Pooling layers" => "api/pool.md", + "Temporal Convolutional layers" => "api/temporalconv.md", + # "Hetero Convolutional layers" => "api/heteroconv.md", + ] + ], + + # "Developer guide" => "dev.md", + ], +) + +rm(joinpath(@__DIR__, "src/GNNGraphs"), force=true, recursive=true) +rm(joinpath(@__DIR__, "src/GNNlib"), force=true, recursive=true) -deploydocs(;repo = "github.com/JuliaGraphs/GraphNeuralNetworks.jl.git", devbranch = "master", dirname = "GNNLux") \ No newline at end of file +deploydocs(repo = "github.com/JuliaGraphs/GraphNeuralNetworks.jl.git", + devbranch = "master", + dirname = "GNNLux") \ No newline at end of file diff --git a/GNNLux/docs/src/api/basic.md b/GNNLux/docs/src/api/basic.md index f61419095..623455340 100644 --- a/GNNLux/docs/src/api/basic.md +++ b/GNNLux/docs/src/api/basic.md @@ -3,7 +3,7 @@ CurrentModule = GNNLux CollapsedDocStrings = true ``` -## Basic Layers +# Basic Layers ```@docs GNNLayer diff --git a/GNNLux/docs/src/api/conv.md b/GNNLux/docs/src/api/conv.md index 73c8df069..146510d05 100644 --- a/GNNLux/docs/src/api/conv.md +++ b/GNNLux/docs/src/api/conv.md @@ -35,7 +35,6 @@ The table below lists all graph convolutional layers implemented in the *GNNLux. | [`SAGEConv`](@ref) | ✓ | | | ✓ | ✓ | | [`SGConv`](@ref) | ✓ | | | | ✓ | -## Docs ```@autodocs Modules = [GNNLux] diff --git a/GNNLux/docs/src/api/temporalconv.md b/GNNLux/docs/src/api/temporalconv.md index d1b9fbb1c..98c9dfa03 100644 --- a/GNNLux/docs/src/api/temporalconv.md +++ b/GNNLux/docs/src/api/temporalconv.md @@ -7,8 +7,6 @@ CollapsedDocStrings = true Convolutions for time-varying graphs (temporal graphs) such as the [`TemporalSnapshotsGNNGraph`](@ref). -## Docs - ```@autodocs Modules = [GNNLux] Pages = ["layers/temporalconv.jl"] diff --git a/GNNLux/docs/src/guides/models.md b/GNNLux/docs/src/guides/models.md new file mode 100644 index 000000000..b71ddb2ff --- /dev/null +++ b/GNNLux/docs/src/guides/models.md @@ -0,0 +1,87 @@ +# Models + +GNNLux.jl provides common graph convolutional layers by which you can assemble arbitrarily deep or complex models. GNN layers are compatible with +Lux.jl ones, therefore expert Lux users are promptly able to define and train +their models. + +In what follows, we discuss two different styles for model creation: +the *explicit modeling* style, more verbose but more flexible, +and the *implicit modeling* style based on [`GNNLux.GNNChain`](@ref), more concise but less flexible. + +## Explicit modeling + +In the explicit modeling style, the model is created according to the following steps: + +1. Define a new type for your model (`GNN` in the example below). Refer to the + [Lux Manual](https://lux.csail.mit.edu/dev/manual/interface#lux-interface) for the + definition of the type. +2. Define a convenience constructor for your model. +4. Define the forward pass by implementing the call method for your type. +5. Instantiate the model. + +Here is an example of this construction: +```julia +using Lux, GNNLux +using Zygote +using Random, Statistics + +struct GNN <: AbstractLuxContainerLayer{(:conv1, :bn, :conv2, :dropout, :dense)} # step 1 + conv1 + bn + conv2 + dropout + dense +end + +function GNN(din::Int, d::Int, dout::Int) # step 2 + GNN(GraphConv(din => d), + BatchNorm(d), + GraphConv(d => d, relu), + Dropout(0.5), + Dense(d, dout)) +end + +function (model::GNN)(g::GNNGraph, x, ps, st) # step 3 + x, st_conv1 = model.conv1(g, x, ps.conv1, st.conv1) + x, st_bn = model.bn(x, ps.bn, st.bn) + x = relu.(x) + x, st_conv2 = model.conv2(g, x, ps.conv2, st.conv2) + x, st_drop = model.dropout(x, ps.dropout, st.dropout) + x, st_dense = model.dense(x, ps.dense, st.dense) + return x, (conv1=st_conv1, bn=st_bn, conv2=st_conv2, dropout=st_drop, dense=st_dense) +end + +din, d, dout = 3, 4, 2 +model = GNN(din, d, dout) # step 4 +rng = Random.default_rng() +ps, st = Lux.setup(rng, model) +g = rand_graph(rng, 10, 30) +X = randn(Float32, din, 10) + +st = Lux.testmode(st) +y, st = model(g, X, ps, st) +st = Lux.trainmode(st) +grad = Zygote.gradient(ps -> mean(model(g, X, ps, st)[1]), ps)[1] +``` + +## Implicit modeling with GNNChains + +While very flexible, the way in which we defined `GNN` model definition in last section is a bit verbose. +In order to simplify things, we provide the [`GNNLux.GNNChain`](@ref) type. It is very similar +to Lux's well known `Chain`. It allows to compose layers in a sequential fashion as Chain +does, propagating the output of each layer to the next one. In addition, `GNNChain` + propagates the input graph as well, providing it as a first argument +to layers subtyping the [`GNNLux.GNNLayer`](@ref) abstract type. + +Using `GNNChain`, the model definition becomes more concise: + +```julia +model = GNNChain(GraphConv(din => d), + BatchNorm(d), + x -> relu.(x), + GraphConv(d => d, relu), + Dropout(0.5), + Dense(d, dout)) +``` + +The `GNNChain` only propagates the graph and the node features. More complex scenarios, e.g. when also edge features are updated, have to be handled using the explicit definition of the forward pass. diff --git a/GNNLux/docs/src/index.md b/GNNLux/docs/src/index.md index 4c72d44f1..b94f92fb1 100644 --- a/GNNLux/docs/src/index.md +++ b/GNNLux/docs/src/index.md @@ -1,11 +1,23 @@ # GNNLux.jl -GNNLux.jl is a work-in-progress package that implements stateless graph convolutional layers, fully compatible with the [Lux.jl](https://lux.csail.mit.edu/stable/) machine learning framework. It is built on top of the GNNGraphs.jl, GNNlib.jl, and Lux.jl packages. +GNNLux.jl is a package that implements graph convolutional layers fully compatible with the [Lux.jl](https://lux.csail.mit.edu/stable/) deep learning framework. It is built on top of the GNNGraphs.jl, GNNlib.jl, and Lux.jl packages. + +See [GraphNeuralNetworks.jl](https://juliagraphs.org/GraphNeuralNetworks.jl/graphneuralnetworks/) instead for a +[Flux.jl](https://fluxml.ai/Flux.jl/stable/)-based implementation of graph neural networks. + +## Installation + +GNNLux.jl is a registered Julia package. You can easily install it through the package manager : + +```julia +pkg> add GNNLux +``` ## Package overview Let's give a brief overview of the package by solving a graph regression problem with synthetic data. + ### Data preparation We generate a dataset of multiple random graphs with associated data features, then split it into training and testing sets. diff --git a/GraphNeuralNetworks/docs/make.jl b/GraphNeuralNetworks/docs/make.jl index 1b9dad02c..ac5c2017f 100644 --- a/GraphNeuralNetworks/docs/make.jl +++ b/GraphNeuralNetworks/docs/make.jl @@ -43,12 +43,12 @@ makedocs(; "Home" => "index.md", "Guides" => [ - "Graphs" => ["GNNGraphs/guides/gnngraph.md", - "GNNGraphs/guides/heterograph.md", - "GNNGraphs/guides/temporalgraph.md"], + "Graphs" => "GNNGraphs/guides/gnngraph.md", "Message Passing" => "GNNlib/guides/messagepassing.md", "Models" => "guides/models.md", "Datasets" => "GNNGraphs/guides/datasets.md", + "Heterogeneous Graphs" => "GNNGraphs/guides/heterograph.md", + "Temporal Graphs" => "GNNGraphs/guides/temporalgraph.md", ], "Tutorials" => [ diff --git a/GraphNeuralNetworks/docs/src/api/basic.md b/GraphNeuralNetworks/docs/src/api/basic.md index 2fd05ed4f..32e643b86 100644 --- a/GraphNeuralNetworks/docs/src/api/basic.md +++ b/GraphNeuralNetworks/docs/src/api/basic.md @@ -5,8 +5,6 @@ CollapsedDocStrings = true # Basic Layers -## Docs - ```@autodocs Modules = [GraphNeuralNetworks] Pages = ["layers/basic.jl"] diff --git a/GraphNeuralNetworks/docs/src/api/conv.md b/GraphNeuralNetworks/docs/src/api/conv.md index 560cadec8..0418dc6e2 100644 --- a/GraphNeuralNetworks/docs/src/api/conv.md +++ b/GraphNeuralNetworks/docs/src/api/conv.md @@ -38,7 +38,6 @@ The table below lists all graph convolutional layers implemented in the *GraphNe | [`TransformerConv`](@ref) | | | ✓ | | | -## Docs ```@autodocs Modules = [GraphNeuralNetworks] diff --git a/GraphNeuralNetworks/docs/src/api/heteroconv.md b/GraphNeuralNetworks/docs/src/api/heteroconv.md index 5248ee9da..584bf11f7 100644 --- a/GraphNeuralNetworks/docs/src/api/heteroconv.md +++ b/GraphNeuralNetworks/docs/src/api/heteroconv.md @@ -7,8 +7,6 @@ CollapsedDocStrings = true Heterogeneous graph convolutions are implemented in the type `HeteroGraphConv`. `HeteroGraphConv` relies on standard graph convolutional layers to perform message passing on the different relations. -## Docs - ```@autodocs Modules = [GraphNeuralNetworks] Pages = ["layers/heteroconv.jl"] diff --git a/GraphNeuralNetworks/docs/src/api/pool.md b/GraphNeuralNetworks/docs/src/api/pool.md index ac9116094..52e7cbe9b 100644 --- a/GraphNeuralNetworks/docs/src/api/pool.md +++ b/GraphNeuralNetworks/docs/src/api/pool.md @@ -12,8 +12,6 @@ Order = [:type, :function] Pages = ["pool.md"] ``` -## Docs - ```@autodocs Modules = [GraphNeuralNetworks] Pages = ["layers/pool.jl"] diff --git a/GraphNeuralNetworks/docs/src/api/temporalconv.md b/GraphNeuralNetworks/docs/src/api/temporalconv.md index f7b1e3672..dfe322eeb 100644 --- a/GraphNeuralNetworks/docs/src/api/temporalconv.md +++ b/GraphNeuralNetworks/docs/src/api/temporalconv.md @@ -7,7 +7,6 @@ CollapsedDocStrings = true Convolutions for time-varying graphs (temporal graphs) such as the [`TemporalSnapshotsGNNGraph`](@ref). -## Docs ```@autodocs Modules = [GraphNeuralNetworks] diff --git a/GraphNeuralNetworks/docs/src/guides/models.md b/GraphNeuralNetworks/docs/src/guides/models.md index 4a7876390..971f3f60a 100644 --- a/GraphNeuralNetworks/docs/src/guides/models.md +++ b/GraphNeuralNetworks/docs/src/guides/models.md @@ -33,7 +33,7 @@ end Flux.@layer GNN # step 2 function GNN(din::Int, d::Int, dout::Int) # step 3 - GNN(GCNConv(din => d), + GNN(GraphConv(din => d), BatchNorm(d), GraphConv(d => d, relu), Dropout(0.5), @@ -64,23 +64,16 @@ grad = gradient(model -> sum(model(g, X)), model) While very flexible, the way in which we defined `GNN` model definition in last section is a bit verbose. In order to simplify things, we provide the [`GraphNeuralNetworks.GNNChain`](@ref) type. It is very similar to Flux's well known `Chain`. It allows to compose layers in a sequential fashion as Chain -does, propagating the output of each layer to the next one. In addition, `GNNChain` -handles propagates the input graph as well, providing it as a first argument +does, propagating the output of each layer to the next one. In addition, `GNNChain` propagates the input graph as well, providing it as a first argument to layers subtyping the [`GraphNeuralNetworks.GNNLayer`](@ref) abstract type. -Using `GNNChain`, the previous example becomes +Using `GNNChain`, the model definition becomes more concise: ```julia -using Flux, Graphs, GraphNeuralNetworks - -din, d, dout = 3, 4, 2 -g = rand_graph(10, 30) -X = randn(Float32, din, 10) - -model = GNNChain(GCNConv(din => d), +model = GNNChain(GraphConv(din => d), BatchNorm(d), x -> relu.(x), - GCNConv(d => d, relu), + GraphConv(d => d, relu), Dropout(0.5), Dense(d, dout)) ``` diff --git a/GraphNeuralNetworks/docs/src/index.md b/GraphNeuralNetworks/docs/src/index.md index b810d99e9..e38514748 100644 --- a/GraphNeuralNetworks/docs/src/index.md +++ b/GraphNeuralNetworks/docs/src/index.md @@ -2,21 +2,21 @@ GraphNeuralNetworks.jl is a graph neural network package based on the deep learning framework [Flux.jl](https://github.com/FluxML/Flux.jl). -It provides a set of stateful graph convolutional layers and utilities to build graph neural networks. +It provides a set of graph convolutional layers and utilities to build graph neural networks. Among its features: * Implements common graph convolutional layers. * Supports computations on batched graphs. * Easy to define custom layers. -* CUDA support. +* CUDA and AMDGPU support. * Integration with [Graphs.jl](https://github.com/JuliaGraphs/Graphs.jl). * [Examples](https://github.com/JuliaGraphs/GraphNeuralNetworks.jl/tree/master/GraphNeuralNetworks/examples) of node, edge, and graph level machine learning tasks. * Heterogeneous and temporal graphs. The package is part of a larger ecosystem of packages that includes [GNNlib.jl](https://juliagraphs.org/GraphNeuralNetworks.jl/gnnlib), [GNNGraphs.jl](https://juliagraphs.org/GraphNeuralNetworks.jl/gnngraphs), and [GNNLux.jl](https://juliagraphs.org/GraphNeuralNetworks.jl/gnnlux). -GraphNeuralNetworks.jl is the fronted package for Flux.jl users. [Lux.jl](https://lux.csail.mit.edu/stable/) users instead, can rely on GNNLux.jl (still in development). +GraphNeuralNetworks.jl is the fronted package for Flux.jl users. [Lux.jl](https://lux.csail.mit.edu/stable/) users instead, can rely on [GNNLux.jl](https://juliagraphs.org/GraphNeuralNetworks.jl/gnnlux/) ## Installation