From 463e432e1175d6a8192793554b9fb796b6e1ddd0 Mon Sep 17 00:00:00 2001 From: Seth Bromberger Date: Fri, 8 Jun 2018 08:38:32 -0700 Subject: [PATCH] back to using for nonextended functions (#898) * back to using for nonextended functions * Update greedy_color.jl whitespace changes * Update edit_distance.jl whitespace changes --- benchmark/core.jl | 40 +++++----- src/LightGraphs.jl | 23 +++--- src/SimpleGraphs/simpledigraph.jl | 2 +- src/SimpleGraphs/simplegraph.jl | 2 +- src/centrality/betweenness.jl | 30 +++---- src/centrality/closeness.jl | 6 +- src/centrality/eigenvector.jl | 2 +- src/centrality/katz.jl | 4 +- src/centrality/radiality.jl | 6 +- src/centrality/stress.jl | 15 ++-- src/distance.jl | 6 +- src/edit_distance.jl | 98 +++++++++++------------ src/generators/euclideangraphs.jl | 4 +- src/generators/randgraphs.jl | 118 ++++++++++++++-------------- src/generators/staticgraphs.jl | 4 +- src/graphcut/normalized_cut.jl | 56 +++++++------ src/linalg/LinAlg.jl | 10 +-- src/linalg/graphmatrices.jl | 76 +++++++++--------- src/linalg/nonbacktracking.jl | 6 +- src/linalg/spectral.jl | 18 ++--- src/operators.jl | 8 +- src/persistence/common.jl | 6 +- src/shortestpaths/astar.jl | 20 ++--- src/shortestpaths/dijkstra.jl | 12 +-- src/shortestpaths/johnson.jl | 2 +- src/shortestpaths/yen.jl | 15 ++-- src/spanningtrees/kruskal.jl | 12 ++- src/spanningtrees/prim.jl | 18 ++--- src/traversals/diffusion.jl | 33 ++++---- src/traversals/greedy_color.jl | 38 ++++----- src/traversals/maxadjvisit.jl | 26 +++--- src/utils.jl | 8 +- test/biconnectivity/articulation.jl | 2 +- test/biconnectivity/biconnect.jl | 2 +- test/centrality/pagerank.jl | 44 +++++------ test/community/core-periphery.jl | 2 +- test/community/label_propagation.jl | 2 +- test/generators/binomial.jl | 5 +- test/generators/randgraphs.jl | 6 +- test/generators/staticgraphs.jl | 12 +-- test/graphcut/normalized_cut.jl | 4 +- test/linalg/graphmatrices.jl | 46 +++++------ test/linalg/runtests.jl | 4 +- test/linalg/spectral.jl | 26 +++--- test/operators.jl | 12 +-- test/runtests.jl | 1 + test/shortestpaths/astar.jl | 2 +- test/shortestpaths/bellman-ford.jl | 2 +- test/shortestpaths/dijkstra.jl | 2 +- test/shortestpaths/johnson.jl | 49 ++++++------ test/shortestpaths/yen.jl | 2 +- test/simplegraphs/simplegraphs.jl | 22 +++--- test/traversals/bipartition.jl | 2 +- 53 files changed, 468 insertions(+), 505 deletions(-) diff --git a/benchmark/core.jl b/benchmark/core.jl index a0e9e229a..dd984b476 100644 --- a/benchmark/core.jl +++ b/benchmark/core.jl @@ -1,13 +1,13 @@ function bench_iteredges(g::AbstractGraph) - i = 0 - for e in edges(g) - i += 1 - end - return i + i = 0 + for e in edges(g) + i += 1 + end + return i end function bench_has_edge(g::AbstractGraph) - Random.srand(1) + srand(1) nvg = nv(g) srcs = rand([1:nvg;], cld(nvg, 4)) dsts = rand([1:nvg;], cld(nvg, 4)) @@ -28,18 +28,18 @@ EDGEFNS = [ @benchgroup "edges" begin - for fun in EDGEFNS - @benchgroup "$fun" begin - @benchgroup "graph" begin - for (name, g) in GRAPHS - @bench "$name" $fun($g) - end - end - @benchgroup "digraph" begin - for (name, g) in DIGRAPHS - @bench "$name" $fun($g) - end - end # digraph - end # fun - end + for fun in EDGEFNS + @benchgroup "$fun" begin + @benchgroup "graph" begin + for (name, g) in GRAPHS + @bench "$name" $fun($g) + end + end + @benchgroup "digraph" begin + for (name, g) in DIGRAPHS + @bench "$name" $fun($g) + end + end # digraph + end # fun + end end # edges diff --git a/src/LightGraphs.jl b/src/LightGraphs.jl index f6af17a99..d1126e960 100644 --- a/src/LightGraphs.jl +++ b/src/LightGraphs.jl @@ -3,16 +3,17 @@ module LightGraphs using SimpleTraits -import CodecZlib -import DataStructures -import DelimitedFiles -import Distributed -import IterativeEigensolvers -import LinearAlgebra -import Markdown -import Random -import SharedArrays -import SparseArrays +using CodecZlib: GzipCompressorStream, GzipDecompressorStream +using DataStructures: IntDisjointSets, PriorityQueue, dequeue!, dequeue_pair!, enqueue!, heappop!, heappush!, in_same_set, peek, union! +using Distributed: @distributed +using IterativeEigensolvers: eigs +using LinearAlgebra: I, Symmetric, diagm, eigen, eigvals, norm, rmul!, tril, triu +import LinearAlgebra: Diagonal, issymmetric, mul! +# import Markdown +using Random: AbstractRNG, GLOBAL_RNG, MersenneTwister, randperm, randsubseq!, shuffle, shuffle!, srand +using SharedArrays: SharedMatrix, SharedVector, sdata +using SparseArrays: SparseMatrixCSC, nonzeros, nzrange, rowvals +import SparseArrays: blockdiag, sparse import Base: write, ==, <, *, ≈, convert, isless, issubset, union, intersect, reverse, reverse!, isassigned, getindex, setindex!, show, @@ -25,7 +26,7 @@ export AbstractGraph, AbstractEdge, AbstractEdgeIter, Edge, Graph, SimpleGraph, SimpleGraphFromIterator, DiGraph, SimpleDiGraphFromIterator, SimpleDiGraph, vertices, edges, edgetype, nv, ne, src, dst, -is_directed, +is_directed, IsDirected, has_vertex, has_edge, inneighbors, outneighbors, # core diff --git a/src/SimpleGraphs/simpledigraph.jl b/src/SimpleGraphs/simpledigraph.jl index 159a9ed61..1d6767997 100644 --- a/src/SimpleGraphs/simpledigraph.jl +++ b/src/SimpleGraphs/simpledigraph.jl @@ -31,7 +31,7 @@ SimpleDiGraph(n::T) where T<:Integer = SimpleDiGraph{T}(n) SimpleDiGraph(::Type{T}) where T<:Integer = SimpleDiGraph{T}(zero(T)) # sparse adjacency matrix constructor: SimpleDiGraph(adjmx) -function SimpleDiGraph{T}(adjmx::SparseArrays.SparseMatrixCSC{U}) where T<:Integer where U<:Real +function SimpleDiGraph{T}(adjmx::SparseMatrixCSC{U}) where T<:Integer where U<:Real dima, dimb = size(adjmx) isequal(dima, dimb) || throw(ArgumentError("Adjacency / distance matrices must be square")) diff --git a/src/SimpleGraphs/simplegraph.jl b/src/SimpleGraphs/simplegraph.jl index 807159915..f779789d7 100644 --- a/src/SimpleGraphs/simplegraph.jl +++ b/src/SimpleGraphs/simplegraph.jl @@ -31,7 +31,7 @@ SimpleGraph(::Type{T}) where T <: Integer = SimpleGraph{T}(zero(T)) function SimpleGraph{T}(adjmx::AbstractMatrix) where T <: Integer dima, dimb = size(adjmx) isequal(dima, dimb) || throw(ArgumentError("Adjacency / distance matrices must be square")) - LinearAlgebra.issymmetric(adjmx) || throw(ArgumentError("Adjacency / distance matrices must be symmetric")) + issymmetric(adjmx) || throw(ArgumentError("Adjacency / distance matrices must be symmetric")) g = SimpleGraph(T(dima)) @inbounds for i in findall(triu(adjmx) .!= 0) diff --git a/src/centrality/betweenness.jl b/src/centrality/betweenness.jl index d525e0191..1cce7553f 100644 --- a/src/centrality/betweenness.jl +++ b/src/centrality/betweenness.jl @@ -27,10 +27,9 @@ bc(v) = \\frac{1}{\\mathcal{N}} \\sum_{s \\neq t \\neq v} ### References - Brandes 2001 & Brandes 2008 """ -function betweenness_centrality( - g::AbstractGraph, - vs::AbstractVector = vertices(g), - distmx::AbstractMatrix = weights(g); +function betweenness_centrality(g::AbstractGraph, + vs::AbstractVector=vertices(g), + distmx::AbstractMatrix=weights(g); normalize=true, endpoints=false) @@ -40,7 +39,7 @@ function betweenness_centrality( betweenness = zeros(n_v) for s in vs - if degree(g,s) > 0 # this might be 1? + if degree(g, s) > 0 # this might be 1? state = dijkstra_shortest_paths(g, s, distmx; allpaths=true, trackvertices=true) if endpoints _accumulate_endpoints!(betweenness, state, g, s) @@ -62,10 +61,9 @@ end betweenness_centrality(g::AbstractGraph, k::Integer, distmx::AbstractMatrix=weights(g); normalize=true, endpoints=false) = betweenness_centrality(g, sample(vertices(g), k), distmx; normalize=normalize, endpoints=endpoints) -function parallel_betweenness_centrality( - g::AbstractGraph, - vs::AbstractVector = vertices(g), - distmx::AbstractMatrix = weights(g); +function parallel_betweenness_centrality(g::AbstractGraph, + vs::AbstractVector=vertices(g), + distmx::AbstractMatrix=weights(g); normalize=true, endpoints=false)::Vector{Float64} @@ -75,7 +73,7 @@ function parallel_betweenness_centrality( # Parallel reduction - betweenness = Distributed.@distributed (+) for s in vs + betweenness = @distributed (+) for s in vs temp_betweenness = zeros(n_v) if degree(g, s) > 0 # this might be 1? state = dijkstra_shortest_paths(g, s, distmx; allpaths=true, trackvertices=true) @@ -100,12 +98,10 @@ end parallel_betweenness_centrality(g::AbstractGraph, k::Integer, distmx::AbstractMatrix=weights(g); normalize=true, endpoints=false) = parallel_betweenness_centrality(g, sample(vertices(g), k), distmx; normalize=normalize, endpoints=endpoints) -function _accumulate_basic!( - betweenness::Vector{Float64}, +function _accumulate_basic!(betweenness::Vector{Float64}, state::DijkstraState, g::AbstractGraph, - si::Integer - ) + si::Integer) n_v = length(state.parents) # this is the ttl number of vertices δ = zeros(n_v) @@ -130,12 +126,10 @@ function _accumulate_basic!( return nothing end -function _accumulate_endpoints!( - betweenness::Vector{Float64}, +function _accumulate_endpoints!(betweenness::Vector{Float64}, state::DijkstraState, g::AbstractGraph, - si::Integer - ) + si::Integer) n_v = nv(g) # this is the ttl number of vertices δ = zeros(n_v) diff --git a/src/centrality/closeness.jl b/src/centrality/closeness.jl index dfd40eb53..824688323 100644 --- a/src/centrality/closeness.jl +++ b/src/centrality/closeness.jl @@ -42,9 +42,9 @@ function parallel_closeness_centrality(g::AbstractGraph, n_v = Int(nv(g)) - closeness = SharedArrays.SharedVector{Float64}(n_v) + closeness = SharedVector{Float64}(n_v) - Distributed.@sync Distributed.@distributed for u in vertices(g) + @sync @distributed for u in vertices(g) if degree(g, u) == 0 # no need to do Dijkstra here closeness[u] = 0.0 else @@ -61,5 +61,5 @@ function parallel_closeness_centrality(g::AbstractGraph, end end end - return SharedArrays.sdata(closeness) + return sdata(closeness) end diff --git a/src/centrality/eigenvector.jl b/src/centrality/eigenvector.jl index 936a50c03..191734450 100644 --- a/src/centrality/eigenvector.jl +++ b/src/centrality/eigenvector.jl @@ -24,4 +24,4 @@ eigenvector of the adjacency matrix \$\\mathbf{A}\$. - Mark E. J. Newman: Networks: An Introduction. Oxford University Press, USA, 2010, pp. 169. """ -eigenvector_centrality(g::AbstractGraph) = abs.(vec(IterativeEigensolvers.eigs(adjacency_matrix(g), nev=1)[2]))::Vector{Float64} +eigenvector_centrality(g::AbstractGraph) = abs.(vec(eigs(adjacency_matrix(g), nev=1)[2]))::Vector{Float64} diff --git a/src/centrality/katz.jl b/src/centrality/katz.jl index 5f6f43dca..37bd15e23 100644 --- a/src/centrality/katz.jl +++ b/src/centrality/katz.jl @@ -30,9 +30,9 @@ the centrality calculated for each node in `g`. function katz_centrality(g::AbstractGraph, α::Real=0.3) nvg = nv(g) v = ones(Float64, nvg) - spI = SparseArrays.sparse(one(Float64) * LinearAlgebra.I, nvg, nvg) + spI = sparse(one(Float64) * I, nvg, nvg) A = adjacency_matrix(g, Bool; dir=:in) v = (spI - α * A) \ v - v /= LinearAlgebra.norm(v) + v /= norm(v) return v end diff --git a/src/centrality/radiality.jl b/src/centrality/radiality.jl index 11395ffb3..3b15512f7 100644 --- a/src/centrality/radiality.jl +++ b/src/centrality/radiality.jl @@ -36,10 +36,10 @@ function parallel_radiality_centrality(g::AbstractGraph)::Vector{Float64} n_v = nv(g) vs = vertices(g) n = ne(g) - meandists = SharedArrays.SharedVector{Float64}(Int(n_v)) - maxdists = SharedArrays.SharedVector{Float64}(Int(n_v)) + meandists = SharedVector{Float64}(Int(n_v)) + maxdists = SharedVector{Float64}(Int(n_v)) - Distributed.@sync Distributed.@distributed for i = 1:n_v + @sync @distributed for i = 1:n_v d = dijkstra_shortest_paths(g, vs[i]) maxdists[i] = maximum(d.dists) meandists[i] = sum(d.dists) / (n_v - 1) diff --git a/src/centrality/stress.jl b/src/centrality/stress.jl index 287c56b3d..b3ba8b13c 100644 --- a/src/centrality/stress.jl +++ b/src/centrality/stress.jl @@ -14,7 +14,7 @@ The stress centrality of a vertex ``n`` is defined as the number of shortest pat - Barabási, A.L., Oltvai, Z.N.: Network biology: understanding the cell's functional organization. Nat Rev Genet 5 (2004) 101-113 - Shimbel, A.: Structural parameters of communication networks. Bull Math Biophys 15 (1953) 501-507. """ -function stress_centrality(g::AbstractGraph, vs::AbstractVector = vertices(g)) +function stress_centrality(g::AbstractGraph, vs::AbstractVector=vertices(g)) n_v = nv(g) k = length(vs) isdir = is_directed(g) @@ -33,9 +33,8 @@ stress_centrality(g::AbstractGraph, k::Integer) = stress_centrality(g, sample(vertices(g), k)) -function parallel_stress_centrality( - g::AbstractGraph, - vs::AbstractVector = vertices(g))::Vector{Int} +function parallel_stress_centrality(g::AbstractGraph, + vs::AbstractVector=vertices(g))::Vector{Int} n_v = nv(g) k = length(vs) @@ -43,7 +42,7 @@ function parallel_stress_centrality( # Parallel reduction - stress = Distributed.@distributed (+) for s in vs + stress = @distributed (+) for s in vs temp_stress = zeros(Int, n_v) if degree(g, s) > 0 # this might be 1? state = dijkstra_shortest_paths(g, s; allpaths=true, trackvertices=true) @@ -58,12 +57,10 @@ parallel_stress_centrality(g::AbstractGraph, k::Integer) = parallel_stress_centrality(g, sample(vertices(g), k)) -function _stress_accumulate_basic!( - stress::Vector{Int}, +function _stress_accumulate_basic!(stress::Vector{Int}, state::DijkstraState, g::AbstractGraph, - si::Integer - ) + si::Integer) n_v = length(state.parents) # this is the ttl number of vertices δ = zeros(Int, n_v) diff --git a/src/distance.jl b/src/distance.jl index e53b89e18..3500f6c55 100644 --- a/src/distance.jl +++ b/src/distance.jl @@ -71,11 +71,11 @@ function parallel_eccentricity(g::AbstractGraph, vs::AbstractVector=vertices(g), distmx::AbstractMatrix{T}=weights(g)) where T <: Real vlen = length(vs) - eccs = SharedArrays.SharedVector{T}(vlen) - Distributed.@sync Distributed.@distributed for i = 1:vlen + eccs = SharedVector{T}(vlen) + @sync @distributed for i = 1:vlen eccs[i] = maximum(dijkstra_shortest_paths(g, vs[i], distmx).dists) end - d = SharedArrays.sdata(eccs) + d = sdata(eccs) maximum(d) == typemax(T) && warn("Infinite path length detected") return d end diff --git a/src/edit_distance.jl b/src/edit_distance.jl index 4bd684185..e7bcf422a 100644 --- a/src/edit_distance.jl +++ b/src/edit_distance.jl @@ -41,66 +41,66 @@ if involved costs are equivalent. - Júlio Hoffimann Mendes (juliohm@stanford.edu) """ function edit_distance(G₁::AbstractGraph, G₂::AbstractGraph; - insert_cost::Function = v -> 1.0, - delete_cost::Function = u -> 1.0, - subst_cost::Function= (u, v) -> 0.5, - heuristic::Function=DefaultEditHeuristic) + insert_cost::Function=v -> 1.0, + delete_cost::Function=u -> 1.0, + subst_cost::Function=(u, v) -> 0.5, + heuristic::Function=DefaultEditHeuristic) # A* search heuristic - h(λ) = heuristic(λ, G₁, G₂) + h(λ) = heuristic(λ, G₁, G₂) # initialize open set - OPEN = DataStructures.PriorityQueue{Vector{Tuple}, Float64}() - for v in 1:nv(G₂) - DataStructures.enqueue!(OPEN, [(1, v)], subst_cost(1, v) + h([(1, v)])) - end - DataStructures.enqueue!(OPEN, [(1, 0)], delete_cost(1) + h([(1, 0)])) - - while true - # minimum (partial) edit path - λ, cost = DataStructures.peek(OPEN) - DataStructures.dequeue!(OPEN) - - if is_complete_path(λ, G₁, G₂) - return cost, λ - else - k, _ = λ[end] - vs = setdiff(1:nv(G₂), [v for (u, v) in λ]) - - if k < nv(G₁) # there are still vertices to process in G₁? - for v in vs - λ⁺ = [λ; (k + 1, v)] - DataStructures.enqueue!(OPEN, λ⁺, cost + subst_cost(k + 1, v) + h(λ⁺) - h(λ)) + OPEN = PriorityQueue{Vector{Tuple},Float64}() + for v in 1:nv(G₂) + enqueue!(OPEN, [(1, v)], subst_cost(1, v) + h([(1, v)])) + end + enqueue!(OPEN, [(1, 0)], delete_cost(1) + h([(1, 0)])) + + while true + # minimum (partial) edit path + λ, cost = peek(OPEN) + dequeue!(OPEN) + + if is_complete_path(λ, G₁, G₂) + return cost, λ + else + k, _ = λ[end] + vs = setdiff(1:nv(G₂), [v for (u, v) in λ]) + + if k < nv(G₁) # there are still vertices to process in G₁? + for v in vs + λ⁺ = [λ; (k + 1, v)] + enqueue!(OPEN, λ⁺, cost + subst_cost(k + 1, v) + h(λ⁺) - h(λ)) + end + λ⁺ = [λ; (k + 1, 0)] + enqueue!(OPEN, λ⁺, cost + delete_cost(k + 1) + h(λ⁺) - h(λ)) + else + # add remaining vertices of G₂ to the path + λ⁺ = [λ; [(0, v) for v in vs]] + total_insert_cost = sum(insert_cost, vs) + enqueue!(OPEN, λ⁺, cost + total_insert_cost + h(λ⁺) - h(λ)) + end end - λ⁺ = [λ; (k + 1, 0)] - DataStructures.enqueue!(OPEN, λ⁺, cost + delete_cost(k + 1) + h(λ⁺) - h(λ)) - else - # add remaining vertices of G₂ to the path - λ⁺ = [λ; [(0, v) for v in vs]] - total_insert_cost = sum(insert_cost, vs) - DataStructures.enqueue!(OPEN, λ⁺, cost + total_insert_cost + h(λ⁺) - h(λ)) - end end - end end function is_complete_path(λ, G₁, G₂) - us = Set(); vs = Set() - for (u, v) in λ - push!(us, u) - push!(vs, v) - end - delete!(us, 0) - delete!(vs, 0) - - return length(us) == nv(G₁) && length(vs) == nv(G₂) + us = Set(); vs = Set() + for (u, v) in λ + push!(us, u) + push!(vs, v) + end + delete!(us, 0) + delete!(vs, 0) + + return length(us) == nv(G₁) && length(vs) == nv(G₂) end function DefaultEditHeuristic(λ, G₁::AbstractGraph, G₂::AbstractGraph) - vs = Set([v for (u, v) in λ]) - delete!(vs, 0) + vs = Set([v for (u, v) in λ]) + delete!(vs, 0) - return nv(G₂) - length(vs) + return nv(G₂) - length(vs) end @@ -119,7 +119,7 @@ vertex v ∈ G₂. `p=1`: the p value for p-norm calculation. """ function MinkowskiCost(μ₁::AbstractVector, μ₂::AbstractVector; p::Real=1) - (u, v) -> LinearAlgebra.norm(μ₁[u] - μ₂[v], p) + (u, v) -> norm(μ₁[u] - μ₂[v], p) end """ @@ -132,5 +132,5 @@ Return value similar to `MinkowskiCost`, but ensure costs smaller than 2τ. `τ=1`: value specifying half of the upper limit of the Minkowski cost. """ function BoundedMinkowskiCost(μ₁::AbstractVector, μ₂::AbstractVector; p::Real=1, τ::Real=1) - (u, v) -> 1 / (1 / (2τ) + exp(-LinearAlgebra.norm(μ₁[u] - μ₂[v], p))) + (u, v) -> 1 / (1 / (2τ) + exp(-norm(μ₁[u] - μ₂[v], p))) end diff --git a/src/generators/euclideangraphs.jl b/src/generators/euclideangraphs.jl index 36f8ef856..67df4d224 100644 --- a/src/generators/euclideangraphs.jl +++ b/src/generators/euclideangraphs.jl @@ -8,7 +8,7 @@ a matrix with the points' positions. function euclidean_graph(N::Int, d::Int; L=1., seed = -1, kws...) rng = LightGraphs.getRNG(seed) - points = LinearAlgebra.rmul!(rand(rng, d, N), L) + points = rmul!(rand(rng, d, N), L) return (euclidean_graph(points; L=L, kws...)..., points) end @@ -49,7 +49,7 @@ function euclidean_graph(points::Matrix; else throw(ArgumentError("$bc is not a valid boundary condition")) end - dist = LinearAlgebra.norm(Δ, p) + dist = norm(Δ, p) if dist < cutoff e = Edge(i, j) add_edge!(g, e) diff --git a/src/generators/randgraphs.jl b/src/generators/randgraphs.jl index ac05d80c2..4e3be0476 100644 --- a/src/generators/randgraphs.jl +++ b/src/generators/randgraphs.jl @@ -1,4 +1,4 @@ -function SimpleGraph{T}(nv::Integer, ne::Integer; seed::Int = -1) where T <: Integer +function SimpleGraph{T}(nv::Integer, ne::Integer; seed::Int=-1) where T <: Integer tnv = T(nv) maxe = div(Int(nv) * (nv - 1), 2) @assert(ne <= maxe, "Maximum number of edges for this graph is $maxe") @@ -15,10 +15,10 @@ function SimpleGraph{T}(nv::Integer, ne::Integer; seed::Int = -1) where T <: Int return g end -Graph(nv::T, ne::Integer; seed::Int = -1) where T<: Integer = +Graph(nv::T, ne::Integer; seed::Int=-1) where T <: Integer = Graph{T}(nv, ne, seed=seed) -function SimpleDiGraph{T}(nv::Integer, ne::Integer; seed::Int = -1) where T<:Integer +function SimpleDiGraph{T}(nv::Integer, ne::Integer; seed::Int=-1) where T <: Integer tnv = T(nv) maxe = Int(nv) * (nv - 1) @assert(ne <= maxe, "Maximum number of edges for this graph is $maxe") @@ -34,7 +34,7 @@ function SimpleDiGraph{T}(nv::Integer, ne::Integer; seed::Int = -1) where T<:Int return g end -SimpleDiGraph(nv::T, ne::Integer; seed::Int = -1) where T<:Integer = +SimpleDiGraph(nv::T, ne::Integer; seed::Int=-1) where T <: Integer = SimpleDiGraph{Int}(nv, ne, seed=seed) """ @@ -107,36 +107,36 @@ from the expected values are likely. - Connected Components in Random Graphs with Given Expected Degree Sequences, Linyuan Lu and Fan Chung. [https://link.springer.com/article/10.1007%2FPL00012580](https://link.springer.com/article/10.1007%2FPL00012580) - Efficient Generation of Networks with Given Expected Degrees, Joel C. Miller and Aric Hagberg. [https://doi.org/10.1007/978-3-642-21286-4_10](https://doi.org/10.1007/978-3-642-21286-4_10) """ -function expected_degree_graph(ω::Vector{T}; seed::Int=-1) where T<:Real +function expected_degree_graph(ω::Vector{T}; seed::Int=-1) where T <: Real g = Graph(length(ω)) expected_degree_graph!(g, ω, seed=seed) end -function expected_degree_graph!(g::Graph, ω::Vector{T}; seed::Int=-1) where T<:Real +function expected_degree_graph!(g::Graph, ω::Vector{T}; seed::Int=-1) where T <: Real n = length(ω) - @assert all(zero(T) .<= ω .<= n-one(T)) "Elements of ω needs to be at least 0 and at most n-1" + @assert all(zero(T) .<= ω .<= n - one(T)) "Elements of ω needs to be at least 0 and at most n-1" π = sortperm(ω, rev=true) rng = getRNG(seed) S = sum(ω) - for u=1:(n-1) - v = u+1 - p = min(ω[π[u]]*ω[π[v]]/S, one(T)) - while v <= n && p > zero(p) - if p != one(T) - v += floor(Int, log(rand(rng))/log(one(T)-p)) - end - if v <= n - q = min(ω[π[u]]*ω[π[v]]/S, one(T)) - if rand(rng) < q/p - add_edge!(g, π[u], π[v]) - end - p = q - v += 1 - end - end + for u = 1:(n - 1) + v = u + 1 + p = min(ω[π[u]] * ω[π[v]] / S, one(T)) + while v <= n && p > zero(p) + if p != one(T) + v += floor(Int, log(rand(rng)) / log(one(T) - p)) + end + if v <= n + q = min(ω[π[u]] * ω[π[v]] / S, one(T)) + if rand(rng) < q / p + add_edge!(g, π[u], π[v]) + end + p = q + v += 1 + end + end end return g end @@ -153,7 +153,7 @@ randomized per the model based on probability `β`. - `is_directed=false`: if true, return a directed graph. - `seed=-1`: set the RNG seed. """ -function watts_strogatz(n::Integer, k::Integer, β::Real; is_directed=false, seed::Int = -1) +function watts_strogatz(n::Integer, k::Integer, β::Real; is_directed=false, seed::Int=-1) @assert k < n / 2 if is_directed g = SimpleDiGraph(n) @@ -185,7 +185,7 @@ function watts_strogatz(n::Integer, k::Integer, β::Real; is_directed=false, see return g end -function _suitable(edges::Set{Edge}, potential_edges::Dict{T,T}) where T<:Integer +function _suitable(edges::Set{Edge}, potential_edges::Dict{T,T}) where T <: Integer isempty(potential_edges) && return true list = keys(potential_edges) for s1 in list, s2 in list @@ -195,9 +195,9 @@ function _suitable(edges::Set{Edge}, potential_edges::Dict{T,T}) where T<:Intege return false end -_try_creation(n::Integer, k::Integer, rng::Random.AbstractRNG) = _try_creation(n, fill(k, n), rng) +_try_creation(n::Integer, k::Integer, rng::AbstractRNG) = _try_creation(n, fill(k, n), rng) -function _try_creation(n::T, k::Vector{T}, rng::Random.AbstractRNG) where T<:Integer +function _try_creation(n::T, k::Vector{T}, rng::AbstractRNG) where T <: Integer edges = Set{Edge}() m = 0 stubs = zeros(T, sum(k)) @@ -211,7 +211,7 @@ function _try_creation(n::T, k::Vector{T}, rng::Random.AbstractRNG) where T<:Int while !isempty(stubs) potential_edges = Dict{T,T}() - Random.shuffle!(rng, stubs) + shuffle!(rng, stubs) for i in 1:2:length(stubs) s1, s2 = stubs[i:(i + 1)] if (s1 > s2) @@ -269,14 +269,14 @@ Initial graphs are undirected and consist of isolated vertices by default. - `complete=false`: if true, use a complete graph for the initial graph. - `seed=-1`: set the RNG seed. """ -function barabasi_albert(n::Integer, n0::Integer, k::Integer; is_directed::Bool = false, complete::Bool = false, seed::Int = -1) +function barabasi_albert(n::Integer, n0::Integer, k::Integer; is_directed::Bool=false, complete::Bool=false, seed::Int=-1) if complete g = is_directed ? CompleteDiGraph(n0) : CompleteGraph(n0) else g = is_directed ? SimpleDiGraph(n0) : SimpleGraph(n0) end - barabasi_albert!(g, n, k; seed = seed) + barabasi_albert!(g, n, k; seed=seed) return g end @@ -298,7 +298,7 @@ function barabasi_albert!(g::AbstractGraph, n::Integer, k::Integer; seed::Int=-1 n0 == n && return g # seed random number generator - seed > 0 && Random.srand(seed) + seed > 0 && srand(seed) # add missing vertices sizehint!(g.fadjlist, n) @@ -373,7 +373,7 @@ Time complexity is ``\\mathcal{O}(|V| + |E| log |E|)``. ### References - Goh K-I, Kahng B, Kim D: Universal behaviour of load distribution in scale-free networks. Phys Rev Lett 87(27):278701, 2001. """ -function static_fitness_model(m::Integer, fitness::Vector{T}; seed::Int=-1) where T<:Real +function static_fitness_model(m::Integer, fitness::Vector{T}; seed::Int=-1) where T <: Real m < 0 && throw(ArgumentError("number of edges must be positive")) n = length(fitness) m == 0 && return SimpleGraph(n) @@ -409,7 +409,7 @@ Time complexity is ``\\mathcal{O}(|V| + |E| log |E|)``. ### References - Goh K-I, Kahng B, Kim D: Universal behaviour of load distribution in scale-free networks. Phys Rev Lett 87(27):278701, 2001. """ -function static_fitness_model(m::Integer, fitness_out::Vector{T}, fitness_in::Vector{S}; seed::Int=-1) where T<:Real where S<:Real +function static_fitness_model(m::Integer, fitness_out::Vector{T}, fitness_in::Vector{S}; seed::Int=-1) where T <: Real where S <: Real m < 0 && throw(ArgumentError("number of edges must be positive")) n = length(fitness_out) length(fitness_in) != n && throw(ArgumentError("fitness_in must have the same size as fitness_out")) @@ -433,7 +433,7 @@ function static_fitness_model(m::Integer, fitness_out::Vector{T}, fitness_in::Ve return g end -function _create_static_fitness_graph!(g::AbstractGraph, m::Integer, cum_fitness_out::Vector{T}, cum_fitness_in::Vector{S}, seed::Int) where T<:Real where S<:Real +function _create_static_fitness_graph!(g::AbstractGraph, m::Integer, cum_fitness_out::Vector{T}, cum_fitness_in::Vector{S}, seed::Int) where T <: Real where S <: Real rng = getRNG(seed) max_out = cum_fitness_out[end] max_in = cum_fitness_in[end] @@ -503,7 +503,7 @@ function static_scale_free(n::Integer, m::Integer, α_out::Real, α_in::Float64; fitness_out = _construct_fitness(n, α_out, finite_size_correction) fitness_in = _construct_fitness(n, α_in, finite_size_correction) # eliminate correlation - Random.shuffle!(fitness_in) + shuffle!(fitness_in) static_fitness_model(m, fitness_out, fitness_in, seed=seed) end @@ -513,7 +513,7 @@ function _construct_fitness(n::Integer, α::Real, finite_size_correction::Bool) j = float(n) if finite_size_correction && α < -0.5 # See the Cho et al paper, first page first column + footnote 7 - j += n^(1 + 1 / 2α) * (10sqrt(2) * (1 + α))^(-1 / α) - 1 + j += n^(1 + 1 / 2 * α) * (10 * sqrt(2) * (1 + α))^(-1 / α) - 1 end j = max(j, n) @inbounds for i = 1:n @@ -582,7 +582,7 @@ Time complexity is approximately ``\\mathcal{O}(n \\bar{k}^2)``. ### Implementation Notes Allocates an array of ``n \\bar{k}`` `Int`s. """ -function random_configuration_model(n::Integer, k::Array{T}; seed::Int=-1, check_graphical::Bool=false) where T<:Integer +function random_configuration_model(n::Integer, k::Array{T}; seed::Int=-1, check_graphical::Bool=false) where T <: Integer n != length(k) && throw(ArgumentError("a degree sequence of length n must be provided")) m = sum(k) !iseven(m) && throw(ArgumentError("sum(k) must be even")) @@ -639,13 +639,13 @@ function random_regular_digraph(n::Integer, k::Integer; dir::Symbol=:out, seed:: for r in 1:n l = ((r - 1) * k + 1):(r * k) I[l] .= r - J[l] = sample!(rng, cs, k, exclude = r) + J[l] = sample!(rng, cs, k, exclude=r) end if dir == :out - return SimpleDiGraph(SparseArrays.sparse(I, J, V, n, n)) + return SimpleDiGraph(sparse(I, J, V, n, n)) else - return SimpleDiGraph(SparseArrays.sparse(I, J, V, n, n)') + return SimpleDiGraph(sparse(I, J, V, n, n)') end end @@ -664,7 +664,7 @@ function random_tournament_digraph(n::Integer; seed::Int=-1) rng = getRNG(seed) g = SimpleDiGraph(n) - for i = 1:n, j = i+1:n + for i = 1:n, j = i + 1:n rand(rng, Bool) ? add_edge!(g, Edge(i, j)) : add_edge!(g, Edge(j, i)) end @@ -687,7 +687,7 @@ Return a Graph generated according to the Stochastic Block Model (SBM). For a dynamic version of the SBM see the [`StochasticBlockModel`](@ref) type and related functions. """ -function stochastic_block_model(c::Matrix{T}, n::Vector{U}; seed::Int = -1) where T<:Real where U<:Integer +function stochastic_block_model(c::Matrix{T}, n::Vector{U}; seed::Int=-1) where T <: Real where U <: Integer size(c, 1) == size(c, 2) == length(n) || throw(ArgumentError("matrix-vector size mismatch")) # init dsfmt generator without altering GLOBAL_RNG @@ -728,7 +728,7 @@ end Return a Graph generated according to the Stochastic Block Model (SBM), sampling from an SBM with ``c_{a,a}=cint``, and ``c_{a,b}=cext``. """ -function stochastic_block_model(cint::T, cext::T, n::Vector{U}; seed::Int=-1) where T<:Real where U<:Integer +function stochastic_block_model(cint::T, cext::T, n::Vector{U}; seed::Int=-1) where T <: Real where U <: Integer K = length(n) c = [ifelse(a == b, cint, cext) for a = 1:K, b = 1:K] stochastic_block_model(c, n, seed=seed) @@ -751,11 +751,11 @@ block `k` and any vertex in block `l`. Graphs are generated by taking random ``i,j ∈ V`` and flipping a coin with probability `affinities[nodemap[i],nodemap[j]]`. """ -mutable struct StochasticBlockModel{T<:Integer,P<:Real} +mutable struct StochasticBlockModel{T <: Integer,P <: Real} n::T nodemap::Array{T} affinities::Matrix{P} - rng::Random.MersenneTwister + rng::MersenneTwister end ==(sbm::StochasticBlockModel, other::StochasticBlockModel) = @@ -765,7 +765,7 @@ end # A constructor for StochasticBlockModel that uses the sizes of the blocks # and the affinity matrix. This construction implies that consecutive # vertices will be in the same blocks, except for the block boundaries. -function StochasticBlockModel(sizes::AbstractVector, affinities::AbstractMatrix; seed::Int = -1) +function StochasticBlockModel(sizes::AbstractVector, affinities::AbstractMatrix; seed::Int=-1) csum = cumsum(sizes) j = 1 nodemap = zeros(Int, csum[end]) @@ -786,10 +786,10 @@ end Produce the sbm affinity matrix with internal probabilities `internalp` and external probabilities `externalp`. """ -function sbmaffinity(internalp::Vector{T}, externalp::Real, sizes::Vector{U}) where T<:Real where U<:Integer +function sbmaffinity(internalp::Vector{T}, externalp::Real, sizes::Vector{U}) where T <: Real where U <: Integer numblocks = length(sizes) numblocks == length(internalp) || throw(ArgumentError("Inconsistent input dimensions: internalp, sizes")) - B = LinearAlgebra.diagm(0=>internalp) + externalp * (ones(numblocks, numblocks) - LinearAlgebra.I) + B = diagm(0 => internalp) + externalp * (ones(numblocks, numblocks) - I) return B end @@ -797,20 +797,20 @@ function StochasticBlockModel(internalp::Real, externalp::Real, size::Integer, numblocks::Integer; - seed::Int = -1) + seed::Int=-1) sizes = fill(size, numblocks) B = sbmaffinity(fill(internalp, numblocks), externalp, sizes) StochasticBlockModel(sizes, B, seed=seed) end function StochasticBlockModel(internalp::Vector{T}, externalp::Real, - sizes::Vector{U}; seed::Int = -1) where T<:Real where U<:Integer + sizes::Vector{U}; seed::Int=-1) where T <: Real where U <: Integer B = sbmaffinity(internalp, externalp, sizes) return StochasticBlockModel(sizes, B, seed=seed) end -const biclique = ones(2, 2) - Matrix{Float64}(LinearAlgebra.I, 2, 2) +const biclique = ones(2, 2) - Matrix{Float64}(I, 2, 2) #TODO: this documentation needs work. sbromberger 20170326 """ @@ -824,16 +824,16 @@ This is a specific type of SBM with ``\\frac{k}{2} blocks each with two halves. Each half is connected as a random bipartite graph with probability `intra` The blocks are connected with probability `between`. """ -function nearbipartiteaffinity(sizes::Vector{T}, between::Real, intra::Real) where T<:Integer +function nearbipartiteaffinity(sizes::Vector{T}, between::Real, intra::Real) where T <: Integer numblocks = div(length(sizes), 2) - return kron(between * Matrix{Float64}(LinearAlgebra.I, numblocks, numblocks), biclique) + Matrix{Float64}(LinearAlgebra.I, 2*numblocks, 2*numblocks) * intra + return kron(between * Matrix{Float64}(I, numblocks, numblocks), biclique) + Matrix{Float64}(I, 2 * numblocks, 2 * numblocks) * intra end #Return a generator for edges from a stochastic block model near-bipartite graph. -nearbipartiteaffinity(sizes::Vector{T}, between::Real, inter::Real, noise::Real) where T<:Integer = +nearbipartiteaffinity(sizes::Vector{T}, between::Real, inter::Real, noise::Real) where T <: Integer = nearbipartiteaffinity(sizes, between, inter) .+ noise -nearbipartiteSBM(sizes, between, inter, noise; seed::Int = -1) = +nearbipartiteSBM(sizes, between, inter, noise; seed::Int=-1) = StochasticBlockModel(sizes, nearbipartiteaffinity(sizes, between, inter, noise), seed=seed) """ @@ -841,7 +841,7 @@ nearbipartiteSBM(sizes, between, inter, noise; seed::Int = -1) = Generate a stream of random pairs in `1:n` using random number generator `RNG`. """ -function random_pair(rng::Random.AbstractRNG, n::Integer) +function random_pair(rng::AbstractRNG, n::Integer) f(ch) = begin while true put!(ch, Edge(rand(rng, 1:n), rand(rng, 1:n))) @@ -897,7 +897,7 @@ function blockcounts(sbm::StochasticBlockModel, A::AbstractMatrix) I = collect(1:sbm.n) J = [sbm.nodemap[i] for i in 1:sbm.n] V = ones(sbm.n) - Q = SparseArrays.sparse(I, J, V) + Q = sparse(I, J, V) # Q = Q / Q'Q # @show Q'Q# < 1e-6 return (Q'A) * Q @@ -938,10 +938,10 @@ function kronecker(SCALE, edgefactor, A=0.57, B=0.19, C=0.19) ij .+= 2^(ib - 1) .* (hcat(ii_bit, jj_bit)) end - p = Random.randperm(N) + p = randperm(N) ij = p[ij] - p = Random.randperm(M) + p = randperm(M) ij = ij[p, :] g = SimpleDiGraph(N) diff --git a/src/generators/staticgraphs.jl b/src/generators/staticgraphs.jl index 8ffa45629..f35379cf0 100644 --- a/src/generators/staticgraphs.jl +++ b/src/generators/staticgraphs.jl @@ -222,7 +222,7 @@ Create a double complete binary tree with `k` levels. function DoubleBinaryTree(k::Integer) gl = BinaryTree(k) gr = BinaryTree(k) - g = SparseArrays.blockdiag(gl, gr) + g = blockdiag(gl, gr) add_edge!(g, 1, nv(gl) + 1) return g end @@ -241,7 +241,7 @@ function RoachGraph(k::Integer) nopole = SimpleGraph(2) antannae = crosspath(k, nopole) body = crosspath(k, dipole) - roach = SparseArrays.blockdiag(antannae, body) + roach = blockdiag(antannae, body) add_edge!(roach, nv(antannae) - 1, nv(antannae) + 1) add_edge!(roach, nv(antannae), nv(antannae) + 2) return roach diff --git a/src/graphcut/normalized_cut.jl b/src/graphcut/normalized_cut.jl index cee7b260c..5ac2d2841 100644 --- a/src/graphcut/normalized_cut.jl +++ b/src/graphcut/normalized_cut.jl @@ -8,24 +8,24 @@ function _normalized_cut_cost(cut, W::AbstractMatrix, D) end end end - cut_cost/=2 - return cut_cost/sum(D*cut) + cut_cost/sum(D*(.~cut)) + cut_cost /= 2 + return cut_cost / sum(D * cut) + cut_cost / sum(D * (.~cut)) end -function _normalized_cut_cost(cut, W::SparseArrays.SparseMatrixCSC, D) +function _normalized_cut_cost(cut, W::SparseMatrixCSC, D) cut_cost = 0 - rows = SparseArrays.rowvals(W) - vals = SparseArrays.nonzeros(W) + rows = rowvals(W) + vals = nonzeros(W) n = size(W, 2) for i = 1:n - for j in SparseArrays.nzrange(W, i) + for j in nzrange(W, i) row = rows[j] if cut[i] != cut[row] - cut_cost += vals[j]/2 + cut_cost += vals[j] / 2 end end end - return cut_cost/sum(D*cut) + cut_cost/sum(D*(.~cut)) + return cut_cost / sum(D * cut) + cut_cost / sum(D * (.~cut)) end function _partition_weightmx(cut, W::AbstractMatrix) @@ -41,11 +41,11 @@ function _partition_weightmx(cut, W::AbstractMatrix) if cut[i] == false newvid[i] = j1 vmap1[j1] = i - j1+=1 + j1 += 1 else newvid[i] = j2 vmap2[j2] = i - j2+=1 + j2 += 1 end end @@ -65,7 +65,7 @@ function _partition_weightmx(cut, W::AbstractMatrix) return (W1, W2, vmap1, vmap2) end -function _partition_weightmx(cut, W::SparseArrays.SparseMatrixCSC) +function _partition_weightmx(cut, W::SparseMatrixCSC) nv = length(cut) nv2 = sum(cut) nv1 = nv - nv2 @@ -78,21 +78,21 @@ function _partition_weightmx(cut, W::SparseArrays.SparseMatrixCSC) if cut[i] == false newvid[i] = j1 vmap1[j1] = i - j1+=1 + j1 += 1 else newvid[i] = j2 vmap2[j2] = i - j2+=1 + j2 += 1 end end - rows = SparseArrays.rowvals(W) - vals = SparseArrays.nonzeros(W) + rows = rowvals(W) + vals = nonzeros(W) I1 = Vector{Int}(); I2 = Vector{Int}() J1 = Vector{Int}(); J2 = Vector{Int}() V1 = Vector{Float64}(); V2 = Vector{Float64}() for i = 1:nv - for j in SparseArrays.nzrange(W, i) + for j in nzrange(W, i) row = rows[j] if cut[i] == cut[row] == false push!(I1, newvid[i]) @@ -105,33 +105,33 @@ function _partition_weightmx(cut, W::SparseArrays.SparseMatrixCSC) end end end - W1 = SparseArrays.sparse(I1, J1, V1) - W2 = SparseArrays.sparse(I2, J2, V2) + W1 = sparse(I1, J1, V1) + W2 = sparse(I2, J2, V2) return (W1, W2, vmap1, vmap2) end function _recursive_normalized_cut(W, thres=thres, num_cuts=num_cuts) m, n = size(W) - D = LinearAlgebra.Diagonal(vec(sum(W, dims=2))) + D = Diagonal(vec(sum(W, dims=2))) m == 1 && return [1] #get eigenvector corresponding to second smallest eigenvalue - # v = IterativeEigensolvers.eigs(D-W, D, nev=2, which=:SR)[2][:,2] + # v = eigs(D-W, D, nev=2, which=:SR)[2][:,2] # At least some versions of ARPACK have a bug, this is a workaround invDroot = sqrt.(inv(D)) # equal to Cholesky factorization for diagonal D if n > 10 - ret = IterativeEigensolvers.eigs(invDroot'*(D-W)*invDroot, nev=2, which=:SR)[2][:,2] + ret = eigs(invDroot' * (D - W) * invDroot, nev=2, which=:SR)[2][:,2] else - ret = LinearAlgebra.eigen(Matrix(invDroot'*(D-W)*invDroot)).vectors[:,2] + ret = eigen(Matrix(invDroot' * (D - W) * invDroot)).vectors[:,2] end - v = invDroot*ret + v = invDroot * ret #perform n-cuts with different partitions of v and find best one min_cost = Inf best_thres = -1 for t in range(minimum(v), stop=maximum(v), length=num_cuts) - cut = v.>t + cut = v .> t cost = _normalized_cut_cost(cut, W, D) if cost < min_cost min_cost = cost @@ -141,7 +141,7 @@ function _recursive_normalized_cut(W, thres=thres, num_cuts=num_cuts) if min_cost < thres #split graph, compute normalized_cut for each subgraph recursively and merge indices. - cut = v.>best_thres + cut = v .> best_thres W1, W2, vmap1, vmap2 = _partition_weightmx(cut, W) labels1 = _recursive_normalized_cut(W1, thres, num_cuts) labels2 = _recursive_normalized_cut(W2, thres, num_cuts) @@ -178,12 +178,10 @@ It is important to identify a good threshold for your application. A bisection s ### References "Normalized Cuts and Image Segmentation" - Jianbo Shi and Jitendra Malik """ -function normalized_cut( - g::AbstractGraph, +function normalized_cut(g::AbstractGraph, thres::Real, W::AbstractMatrix{T}=adjacency_matrix(g), - num_cuts::Int = 10 - ) where T <: Real + num_cuts::Int=10) where T <: Real return _recursive_normalized_cut(W, thres, num_cuts) end diff --git a/src/linalg/LinAlg.jl b/src/linalg/LinAlg.jl index 987d11ed1..b5794e9ad 100644 --- a/src/linalg/LinAlg.jl +++ b/src/linalg/LinAlg.jl @@ -1,13 +1,13 @@ module LinAlg using SimpleTraits -import SparseArrays -import LinearAlgebra -import IterativeEigensolvers +using SparseArrays: SparseMatrixCSC +import SparseArrays: blockdiag, sparse +using LinearAlgebra: I, Symmetric, diagm, dot, eigen, eigvals, norm, rmul!, tril, triu +import LinearAlgebra: Diagonal, diag, issymmetric, mul! +using IterativeEigensolvers: eigs using ..LightGraphs -import LightGraphs: IsDirected, AbstractGraph, inneighbors, -outneighbors, all_neighbors, is_directed, nv, ne, has_edge, vertices import Base: convert, size, eltype, ndims, ==, *, .*, length diff --git a/src/linalg/graphmatrices.jl b/src/linalg/graphmatrices.jl index 4f9f071aa..502bdfb1d 100644 --- a/src/linalg/graphmatrices.jl +++ b/src/linalg/graphmatrices.jl @@ -1,4 +1,4 @@ -const SparseMatrix{T} = SparseArrays.SparseMatrixCSC{T,Int64} +const SparseMatrix{T} = SparseMatrixCSC{T,Int64} """ GraphMatrix{T} @@ -85,14 +85,14 @@ function AveragingAdjacency(adjmat::CombinatorialAdjacency) return AveragingAdjacency(adjmat, sf) end -perron(adjmat::NormalizedAdjacency) = sqrt.(adjmat.A.D) / LinearAlgebra.norm(sqrt.(adjmat.A.D)) +perron(adjmat::NormalizedAdjacency) = sqrt.(adjmat.A.D) / norm(sqrt.(adjmat.A.D)) struct PunchedAdjacency{T} <: Adjacency{T} A::NormalizedAdjacency{T} perron::Vector{T} end function PunchedAdjacency(adjmat::CombinatorialAdjacency) - perron = sqrt.(adjmat.D) / LinearAlgebra.norm(sqrt.(adjmat.D)) + perron = sqrt.(adjmat.D) / norm(sqrt.(adjmat.D)) return PunchedAdjacency(NormalizedAdjacency(adjmat), perron) end @@ -111,7 +111,7 @@ struct Noop end Broadcast.broadcasted(::typeof(*), ::Noop, x) = x -LinearAlgebra.Diagonal(::Noop) = Noop() +Diagonal(::Noop) = Noop() ==(g::GraphMatrix, h::GraphMatrix) = typeof(g) == typeof(h) && (g.A == h.A) @@ -166,10 +166,10 @@ arrayfunctions = (:eltype, :length, :ndims, :size, :strides) for f in arrayfunctions @eval $f(a::GraphMatrix) = $f(a.A) end -LinearAlgebra.issymmetric(a::GraphMatrix) = LinearAlgebra.issymmetric(a.A) +issymmetric(a::GraphMatrix) = issymmetric(a.A) size(a::GraphMatrix, i::Integer) = size(a.A, i) -LinearAlgebra.issymmetric(::StochasticAdjacency) = false -LinearAlgebra.issymmetric(::AveragingAdjacency) = false +issymmetric(::StochasticAdjacency) = false +issymmetric(::AveragingAdjacency) = false """ degrees(adjmat) @@ -194,20 +194,20 @@ convert(::Type{CombinatorialAdjacency}, adjmat::Adjacency) = adjmat.A convert(::Type{CombinatorialAdjacency}, adjmat::CombinatorialAdjacency) = adjmat -function SparseArrays.sparse(lapl::M) where M <: Laplacian +function sparse(lapl::M) where M <: Laplacian adjmat = adjacency(lapl) - A = SparseArrays.sparse(adjmat) - L = SparseArrays.sparse(LinearAlgebra.Diagonal(SparseArrays.diag(lapl))) - A + A = sparse(adjmat) + L = sparse(Diagonal(diag(lapl))) - A return L end function SparseMatrix(lapl::M) where M <: GraphMatrix - return SparseArrays.sparse(lapl) + return sparse(lapl) end -function SparseArrays.sparse(adjmat::Adjacency) - A = SparseArrays.sparse(adjmat.A) - return LinearAlgebra.Diagonal(prescalefactor(adjmat)) * (A * LinearAlgebra.Diagonal(postscalefactor(adjmat))) +function sparse(adjmat::Adjacency) + A = sparse(adjmat.A) + return Diagonal(prescalefactor(adjmat)) * (A * Diagonal(postscalefactor(adjmat))) end @@ -215,12 +215,12 @@ end function convert(::Type{SparseMatrix{T}}, lapl::Laplacian{T}) where T adjmat = adjacency(lapl) A = convert(SparseMatrix{T}, adjmat) - L = SparseArrays.sparse(LinearAlgebra.Diagonal(SparseArrays.diag(lapl))) - A + L = sparse(Diagonal(diag(lapl))) - A return L end -SparseArrays.diag(lapl::CombinatorialLaplacian) = lapl.A.D -SparseArrays.diag(lapl::Laplacian) = ones(size(lapl)[2]) +diag(lapl::CombinatorialLaplacian) = lapl.A.D +diag(lapl::Laplacian) = ones(size(lapl)[2]) *(x::AbstractArray, ::Noop) = x *(::Noop, x) = x @@ -231,46 +231,46 @@ SparseArrays.diag(lapl::Laplacian) = ones(size(lapl)[2]) adjmat.A * x *(lapl::Laplacian{T}, x::AbstractVector{T}) where T <: Number = - (SparseArrays.diag(lapl) .* x) - (adjacency(lapl) * x) + (diag(lapl) .* x) - (adjacency(lapl) * x) function *(adjmat::PunchedAdjacency{T}, x::AbstractVector{T}) where T <: Number y = adjmat.A * x - return y - LinearAlgebra.dot(adjmat.perron, y) * adjmat.perron + return y - dot(adjmat.perron, y) * adjmat.perron end -function LinearAlgebra.mul!(Y, A::Adjacency, B) +function mul!(Y, A::Adjacency, B) # we need to do 3 matrix products # Y and B can't overlap in any one call to mul! # The last call to mul! must be (Y, postscalefactor, tmp) # so we need to write to tmp in the second step must be (tmp, A.A, Y) # and the first step (Y, prescalefactor, B) - tmp1 = LinearAlgebra.Diagonal(prescalefactor(A)) * B + tmp1 = Diagonal(prescalefactor(A)) * B tmp = similar(Y) - LinearAlgebra.mul!(tmp, A.A, tmp1) - return LinearAlgebra.mul!(Y, LinearAlgebra.Diagonal(postscalefactor(A)), tmp) + mul!(tmp, A.A, tmp1) + return mul!(Y, Diagonal(postscalefactor(A)), tmp) end -LinearAlgebra.mul!(Y, A::CombinatorialAdjacency, B) = LinearAlgebra.mul!(Y, A.A, B) +mul!(Y, A::CombinatorialAdjacency, B) = mul!(Y, A.A, B) # You can compute the StochasticAdjacency product without allocating a similar of Y. # This is true for all Adjacency where the postscalefactor is a Noop # at time of writing this is just StochasticAdjacency and CombinatorialAdjacency -function LinearAlgebra.mul!(Y, A::StochasticAdjacency, B) - tmp = LinearAlgebra.Diagonal(prescalefactor(A)) * B - LinearAlgebra.mul!(Y, A.A, tmp) +function mul!(Y, A::StochasticAdjacency, B) + tmp = Diagonal(prescalefactor(A)) * B + mul!(Y, A.A, tmp) return Y end -function LinearAlgebra.mul!(Y, adjmat::PunchedAdjacency, x) +function mul!(Y, adjmat::PunchedAdjacency, x) y = adjmat.A * x - Y[:] = y - LinearAlgebra.dot(adjmat.perron, y) * adjmat.perron + Y[:] = y - dot(adjmat.perron, y) * adjmat.perron return Y end -function LinearAlgebra.mul!(Y, lapl::Laplacian, B) - LinearAlgebra.mul!(Y, lapl.A, B) - z = SparseArrays.diag(lapl) .* B +function mul!(Y, lapl::Laplacian, B) + mul!(Y, lapl.A, B) + z = diag(lapl) .* B Y[:] = z - Y[:] return Y end @@ -284,21 +284,21 @@ Return a symmetric version of graph (represented by sparse matrix `A`) as a spar """ function symmetrize(A::SparseMatrix, which=:or) if which == :or - M = A + SparseArrays.sparse(A') + M = A + sparse(A') M.nzval[M.nzval .== 2] .= 1 return M end T = A if which == :triu - T = LinearAlgebra.triu(A) + T = triu(A) elseif which == :tril - T = LinearAlgebra.tril(A) + T = tril(A) elseif which == :sum T = A else throw(ArgumentError("$which is not a supported method of symmetrizing a matrix")) end - M = T + SparseArrays.sparse(T') + M = T + sparse(T') return M end @@ -317,9 +317,9 @@ symmetrize(adjmat::CombinatorialAdjacency, which=:or) = # per #564 -# @deprecate LinearAlgebra.mul!(Y, A::Noop, B) None +# @deprecate mul!(Y, A::Noop, B) None @deprecate convert(::Type{Adjacency}, lapl::Laplacian) None -@deprecate convert(::Type{SparseMatrix}, adjmat::GraphMatrix) SparseArrays.sparse(adjmat) +@deprecate convert(::Type{SparseMatrix}, adjmat::GraphMatrix) sparse(adjmat) diff --git a/src/linalg/nonbacktracking.jl b/src/linalg/nonbacktracking.jl index 6ce6ff0f9..38741e2b9 100644 --- a/src/linalg/nonbacktracking.jl +++ b/src/linalg/nonbacktracking.jl @@ -85,7 +85,7 @@ end size(nbt::Nonbacktracking) = (nbt.m, nbt.m) eltype(nbt::Nonbacktracking) = Float64 -LinearAlgebra.issymmetric(nbt::Nonbacktracking) = false +issymmetric(nbt::Nonbacktracking) = false function *(nbt::Nonbacktracking, x::Vector{T}) where T <: Number length(x) == nbt.m || error("dimension mismatch") @@ -100,7 +100,7 @@ function *(nbt::Nonbacktracking, x::Vector{T}) where T <: Number end return y end -function LinearAlgebra.mul!(C, nbt::Nonbacktracking, B) +function mul!(C, nbt::Nonbacktracking, B) # computs C = A * B for i in 1:size(B, 2) C[:, i] = nbt * B[:, i] @@ -126,7 +126,7 @@ function coo_sparse(nbt::Nonbacktracking) return I, J, 1.0 end -SparseArrays.sparse(nbt::Nonbacktracking) = SparseArrays.sparse(coo_sparse(nbt)..., nbt.m, nbt.m) +sparse(nbt::Nonbacktracking) = sparse(coo_sparse(nbt)..., nbt.m, nbt.m) function *(nbt::Nonbacktracking, x::AbstractMatrix) y = zero(x) diff --git a/src/linalg/spectral.jl b/src/linalg/spectral.jl index 31c5504cb..2c9d19ac4 100644 --- a/src/linalg/spectral.jl +++ b/src/linalg/spectral.jl @@ -49,7 +49,7 @@ function _adjacency_matrix(g::AbstractGraph{U}, T::DataType, neighborfn::Functio colpt[j + 1] = colpt[j] + length(dsts) append!(rowval, sort!(dsts)) end - spmx = SparseArrays.SparseMatrixCSC(n_v, n_v, colpt, rowval, ones(T, nz)) + spmx = SparseMatrixCSC(n_v, n_v, colpt, rowval, ones(T, nz)) # this is inefficient. There should be a better way of doing this. # the issue is that adjacency matrix entries for self-loops are 2, @@ -80,7 +80,7 @@ function laplacian_matrix(g::AbstractGraph{U}, T::DataType=Int; dir::Symbol=:uns dir = is_directed(g) ? :both : :out end A = adjacency_matrix(g, T; dir=dir) - D = convert(SparseArrays.SparseMatrixCSC{T,U}, LinearAlgebra.Diagonal(SparseArrays.sparse(sum(A, dims=2)[:]))) + D = convert(SparseMatrixCSC{T,U}, Diagonal(sparse(sum(A, dims=2)[:]))) return D - A end @@ -98,10 +98,10 @@ by vertex. Default values for `T` are the same as those in Converts the matrix to dense with ``nv^2`` memory usage. ### Implementation Notes -Use `IterativeEigensolvers.eigs(laplacian_matrix(g); kwargs...)` to compute some of the +Use `eigs(laplacian_matrix(g); kwargs...)` to compute some of the eigenvalues/eigenvectors. """ -laplacian_spectrum(g::AbstractGraph, T::DataType=Int; dir::Symbol=:unspec) = LinearAlgebra.eigvals(Matrix(laplacian_matrix(g, T; dir=dir))) +laplacian_spectrum(g::AbstractGraph, T::DataType=Int; dir::Symbol=:unspec) = eigvals(Matrix(laplacian_matrix(g, T; dir=dir))) """ Return the eigenvalues of the adjacency matrix for a graph `g`, indexed @@ -115,14 +115,14 @@ by vertex. Default values for `T` are the same as those in Converts the matrix to dense with ``nv^2`` memory usage. ### Implementation Notes -Use `IterativeEigensolvers.eigs(adjacency_matrix(g); kwargs...)` to compute some of the +Use `eigs(adjacency_matrix(g); kwargs...)` to compute some of the eigenvalues/eigenvectors. """ function adjacency_spectrum(g::AbstractGraph, T::DataType=Int; dir::Symbol=:unspec) if dir == :unspec dir = is_directed(g) ? :both : :out end - return LinearAlgebra.eigvals(Matrix(adjacency_matrix(g, T; dir=dir))) + return eigvals(Matrix(adjacency_matrix(g, T; dir=dir))) end """ @@ -162,7 +162,7 @@ function incidence_matrix(g::AbstractGraph, T::DataType=Int; oriented=false) end end - spmx = SparseArrays.SparseMatrixCSC(n_v, n_e, colpt, rowval, nzval) + spmx = SparseMatrixCSC(n_v, n_e, colpt, rowval, nzval) return spmx end @@ -183,8 +183,8 @@ function spectral_distance end A₁ = adjacency_matrix(G₁) A₂ = adjacency_matrix(G₂) - λ₁ = k < nv(G₁) - 1 ? IterativeEigensolvers.eigs(A₁, nev=k, which=:LR)[1] : LinearAlgebra.eigvals(Matrix(A₁))[end:-1:(end - (k - 1))] - λ₂ = k < nv(G₂) - 1 ? IterativeEigensolvers.eigs(A₂, nev=k, which=:LR)[1] : LinearAlgebra.eigvals(Matrix(A₂))[end:-1:(end - (k - 1))] + λ₁ = k < nv(G₁) - 1 ? eigs(A₁, nev=k, which=:LR)[1] : eigvals(Matrix(A₁))[end:-1:(end - (k - 1))] + λ₂ = k < nv(G₂) - 1 ? eigs(A₂, nev=k, which=:LR)[1] : eigvals(Matrix(A₂))[end:-1:(end - (k - 1))] return sum(abs, (λ₁ - λ₂)) end diff --git a/src/operators.jl b/src/operators.jl index 8e2c07165..1c961c414 100644 --- a/src/operators.jl +++ b/src/operators.jl @@ -72,7 +72,7 @@ edges where the vertices an edges from graph `h` are appended to graph `g`. Preserves the eltype of the input graph. Will error if the number of vertices in the generated graph exceeds the eltype. """ -function SparseArrays.blockdiag(g::T, h::T) where T <: AbstractGraph +function blockdiag(g::T, h::T) where T <: AbstractGraph gnv = nv(g) r = T(gnv + nv(h)) for e in edges(g) @@ -189,7 +189,7 @@ Preserves the eltype of the input graph. Will error if the number of vertices in the generated graph exceeds the eltype. """ function join(g::T, h::T) where T <: AbstractGraph - r = SparseArrays.blockdiag(g, h) + r = blockdiag(g, h) for i in vertices(g) for j = (nv(g) + 1):(nv(g) + nv(h)) add_edge!(r, i, j) @@ -276,11 +276,11 @@ sum(g::AbstractGraph) = ne(g) Return the default adjacency matrix of `g`. """ -SparseArrays.sparse(g::AbstractGraph) = adjacency_matrix(g) +sparse(g::AbstractGraph) = adjacency_matrix(g) length(g::AbstractGraph) = nv(g) * nv(g) ndims(g::AbstractGraph) = 2 -LinearAlgebra.issymmetric(g::AbstractGraph) = !is_directed(g) +issymmetric(g::AbstractGraph) = !is_directed(g) """ cartesian_product(g, h) diff --git a/src/persistence/common.jl b/src/persistence/common.jl index abbd6e98f..536afafd6 100644 --- a/src/persistence/common.jl +++ b/src/persistence/common.jl @@ -52,7 +52,7 @@ function auto_decompress(io::IO) end reset(io) if format == :gzip - io = CodecZlib.GzipDecompressorStream(io) + io = GzipDecompressorStream(io) end return io end @@ -74,7 +74,7 @@ function savegraph(fn::AbstractString, g::AbstractGraph, gname::AbstractString, io = open(fn, "w") try if compress - io = CodecZlib.GzipCompressorStream(io) + io = GzipCompressorStream(io) end return savegraph(io, g, gname, format) catch @@ -106,7 +106,7 @@ function savegraph(fn::AbstractString, d::Dict{T,U}, io = open(fn, "w") try if compress - io = CodecZlib.GzipCompressorStream(io) + io = GzipCompressorStream(io) end return savegraph(io, d, format) catch diff --git a/src/shortestpaths/astar.jl b/src/shortestpaths/astar.jl index a803c8786..fc2561a13 100644 --- a/src/shortestpaths/astar.jl +++ b/src/shortestpaths/astar.jl @@ -3,17 +3,15 @@ # A* shortest-path algorithm -function a_star_impl!( - g::AbstractGraph,# the graph +function a_star_impl!(g::AbstractGraph,# the graph t::Integer, # the end vertex frontier, # an initialized heap containing the active vertices colormap::Vector{Int}, # an (initialized) color-map to indicate status of vertices distmx::AbstractMatrix, - heuristic::Function # heuristic fn (under)estimating distance to target - ) + heuristic::Function) while !isempty(frontier) - (cost_so_far, path, u) = DataStructures.dequeue!(frontier) + (cost_so_far, path, u) = dequeue!(frontier) if u == t return path end @@ -25,7 +23,7 @@ function a_star_impl!( colormap[v] = 1 new_path = cat(path, Edge(u, v), dims=1) path_cost = cost_so_far + dist - DataStructures.enqueue!(frontier, + enqueue!(frontier, (path_cost, new_path, v), path_cost + heuristic(v)) end @@ -44,16 +42,14 @@ An optional heuristic function and edge distance matrix may be supplied. If miss the distance matrix is set to [`LightGraphs.DefaultDistance`](@ref) and the heuristic is set to `n -> 0`. """ -function a_star( - g::AbstractGraph{U}, # the g +function a_star(g::AbstractGraph{U}, # the g s::Integer, # the start vertex t::Integer, # the end vertex - distmx::AbstractMatrix{T} = weights(g), - heuristic::Function = n -> 0 - ) where T where U + distmx::AbstractMatrix{T}=weights(g), + heuristic::Function=n -> 0) where T where U # heuristic (under)estimating distance to target - frontier = DataStructures.PriorityQueue{Tuple{T,Vector{Edge},U}, T}() + frontier = PriorityQueue{Tuple{T,Vector{Edge},U},T}() frontier[(zero(T), Vector{Edge}(), s)] = zero(T) colormap = zeros(Int, nv(g)) colormap[s] = 1 diff --git a/src/shortestpaths/dijkstra.jl b/src/shortestpaths/dijkstra.jl index 4d2d612f7..08ed2e4b7 100644 --- a/src/shortestpaths/dijkstra.jl +++ b/src/shortestpaths/dijkstra.jl @@ -35,7 +35,7 @@ function dijkstra_shortest_paths(g::AbstractGraph, preds = fill(Vector{U}(), nvg) visited = zeros(Bool, nvg) pathcounts = zeros(Int, nvg) - H = DataStructures.PriorityQueue{U,T}() + H = PriorityQueue{U,T}() dists[srcs] .= zero(T) pathcounts[srcs] .= 1 @@ -49,7 +49,7 @@ function dijkstra_shortest_paths(g::AbstractGraph, end while !isempty(H) - hentry = DataStructures.dequeue_pair!(H) + hentry = dequeue_pair!(H) # info("Popped H - got $(hentry.vertex)") u = hentry[1] @@ -135,15 +135,15 @@ function parallel_multisource_dijkstra_shortest_paths(g::AbstractGraph{U}, r_v = length(sources) # TODO: remove `Int` once julialang/#23029 / #23032 are resolved - dists = SharedArrays.SharedMatrix{T}(Int(r_v), Int(n_v)) - parents = SharedArrays.SharedMatrix{U}(Int(r_v), Int(n_v)) + dists = SharedMatrix{T}(Int(r_v), Int(n_v)) + parents = SharedMatrix{U}(Int(r_v), Int(n_v)) - Distributed.@sync Distributed.@distributed for i in 1:r_v + @sync @distributed for i in 1:r_v state = dijkstra_shortest_paths(g, sources[i], distmx) dists[i, :] = state.dists parents[i, :] = state.parents end - result = MultipleDijkstraState(SharedArrays.sdata(dists), SharedArrays.sdata(parents)) + result = MultipleDijkstraState(sdata(dists), sdata(parents)) return result end diff --git a/src/shortestpaths/johnson.jl b/src/shortestpaths/johnson.jl index d83f44e42..b37b1ac30 100644 --- a/src/shortestpaths/johnson.jl +++ b/src/shortestpaths/johnson.jl @@ -42,7 +42,7 @@ function johnson_shortest_paths(g::AbstractGraph{U}, wt_transform = bellman_ford_shortest_paths(g, vertices(g), distmx).dists if !type_distmx.mutable && type_distmx != LightGraphs.DefaultDistance - distmx = SparseArrays.sparse(distmx) #Change reference, not value + distmx = sparse(distmx) #Change reference, not value end #Weight transform not needed if all weights are positive. diff --git a/src/shortestpaths/yen.jl b/src/shortestpaths/yen.jl index 029d327d4..d9832289c 100644 --- a/src/shortestpaths/yen.jl +++ b/src/shortestpaths/yen.jl @@ -3,7 +3,7 @@ Designed for yen k-shortest-paths calculations. """ -struct YenState{T,U<:Integer} <: AbstractPathState +struct YenState{T,U <: Integer} <: AbstractPathState dists::Vector{T} paths::Vector{Vector{U}} end @@ -16,8 +16,7 @@ Perform [Yen's algorithm](http://en.wikipedia.org/wiki/Yen%27s_algorithm) on a graph, computing k-shortest distances between `source` and `target` other vertices. Return a [`YenState`](@ref) that contains distances and paths. """ -function yen_k_shortest_paths( - g::AbstractGraph, +function yen_k_shortest_paths(g::AbstractGraph, source::U, target::U, distmx::AbstractMatrix{T}=weights(g), @@ -33,7 +32,7 @@ function yen_k_shortest_paths( dists = Array{T,1}() push!(dists, dj.dists[target]) A = [path] - B = DataStructures.PriorityQueue() + B = PriorityQueue() gcopy = deepcopy(g) for k = 1:(K - 1) @@ -81,7 +80,7 @@ function yen_k_shortest_paths( distpath = distrootpath + djspur.dists[target] # Add the potential k-shortest path to the heap if !haskey(B, pathtotal) - DataStructures.enqueue!(B, pathtotal, distpath) + enqueue!(B, pathtotal, distpath) end end @@ -92,11 +91,11 @@ function yen_k_shortest_paths( # No more paths in B isempty(B) && break - mindistB = DataStructures.peek(B)[2] + mindistB = peek(B)[2] # The path with minimum distance in B is higher than maxdist mindistB > maxdist && break - push!(dists, DataStructures.peek(B)[2]) - push!(A, DataStructures.dequeue!(B)) + push!(dists, peek(B)[2]) + push!(A, dequeue!(B)) end return YenState{T,U}(dists, A) diff --git a/src/spanningtrees/kruskal.jl b/src/spanningtrees/kruskal.jl index 2fb01bfff..b01fce811 100644 --- a/src/spanningtrees/kruskal.jl +++ b/src/spanningtrees/kruskal.jl @@ -5,12 +5,10 @@ distance matrix `distmx` using [Kruskal's algorithm](https://en.wikipedia.org/wi """ function kruskal_mst end # see https://github.com/mauro3/SimpleTraits.jl/issues/47#issuecomment-327880153 for syntax -@traitfn function kruskal_mst( - g::AG::(!IsDirected), - distmx::AbstractMatrix{T} = weights(g) -) where {T<:Real, U, AG<:AbstractGraph{U}} +@traitfn function kruskal_mst(g::AG::(!IsDirected), + distmx::AbstractMatrix{T}=weights(g)) where {T <: Real, U, AG <: AbstractGraph{U}} - connected_vs = DataStructures.IntDisjointSets(nv(g)) + connected_vs = IntDisjointSets(nv(g)) mst = Vector{Edge}() sizehint!(mst, nv(g) - 1) @@ -23,8 +21,8 @@ function kruskal_mst end end for e in edge_list[sortperm(weights)] - if !DataStructures.in_same_set(connected_vs, e.src, e.dst) - DataStructures.union!(connected_vs, e.src, e.dst) + if !in_same_set(connected_vs, e.src, e.dst) + union!(connected_vs, e.src, e.dst) push!(mst, e) (length(mst) >= nv(g) - 1) && break end diff --git a/src/spanningtrees/prim.jl b/src/spanningtrees/prim.jl index cacab2784..4a88d05f3 100644 --- a/src/spanningtrees/prim.jl +++ b/src/spanningtrees/prim.jl @@ -1,4 +1,4 @@ -struct PrimHeapEntry{T<:Real} +struct PrimHeapEntry{T <: Real} edge::Edge dist::T end @@ -13,10 +13,8 @@ distance matrix `distmx` using [Prim's algorithm](https://en.wikipedia.org/wiki/ Return a vector of edges. """ function prim_mst end -@traitfn function prim_mst( - g::AG::(!IsDirected), - distmx::AbstractMatrix{T} = weights(g) - ) where {T<:Real, U, AG<:AbstractGraph{U}} +@traitfn function prim_mst(g::AG::(!IsDirected), + distmx::AbstractMatrix{T}=weights(g)) where {T <: Real, U, AG <: AbstractGraph{U}} pq = Vector{PrimHeapEntry{T}}() mst = Vector{Edge}() marked = zeros(Bool, nv(g)) @@ -26,7 +24,7 @@ function prim_mst end visit!(g, 1, marked, pq, distmx) while !isempty(pq) - heap_entry = DataStructures.heappop!(pq) + heap_entry = heappop!(pq) v = src(heap_entry.edge) w = dst(heap_entry.edge) @@ -46,19 +44,17 @@ end Mark the vertex `v` of graph `g` true in the array `marked` and enter all its edges into priority queue `pq` with its `distmx` values as a PrimHeapEntry. """ -function visit!( - g::AbstractGraph, +function visit!(g::AbstractGraph, v::Integer, marked::AbstractVector{Bool}, pq::AbstractVector, - distmx::AbstractMatrix -) + distmx::AbstractMatrix) marked[v] = true for w in outneighbors(g, v) if !marked[w] x = min(v, w) y = max(v, w) - DataStructures.heappush!(pq, PrimHeapEntry(Edge(x, y), distmx[x, y])) + heappush!(pq, PrimHeapEntry(Edge(x, y), distmx[x, y])) end end end diff --git a/src/traversals/diffusion.jl b/src/traversals/diffusion.jl index de7684daf..10d9cb65b 100644 --- a/src/traversals/diffusion.jl +++ b/src/traversals/diffusion.jl @@ -17,12 +17,12 @@ from a vertex ``i`` to each of the `outneighbors` of ``i`` to ``\\frac{p}{outdegreee(g, i)}``. """ function diffusion(g::AbstractGraph{T}, - p::Real, - n::Integer; - watch::AbstractVector=Vector{Int}(), - initial_infections::AbstractVector=LightGraphs.sample(vertices(g), 1), - normalize::Bool=false - ) where T + p::Real, + n::Integer; + watch::AbstractVector=Vector{Int}(), + initial_infections::AbstractVector=LightGraphs.sample(vertices(g), 1), + normalize::Bool=false + ) where T # Initialize watch_set = Set{T}(watch) @@ -54,18 +54,18 @@ function diffusion(g::AbstractGraph{T}, local_p = p end - Random.randsubseq!(randsubseq_buf, outn, local_p) + randsubseq!(randsubseq_buf, outn, local_p) union!(new_infections, randsubseq_buf) end end # Record only new infections setdiff!(new_infections, infected_vertices) - if !isempty(watch_set) - vertices_per_step[step] = T.(collect(intersect(new_infections, watch_set))) - else - vertices_per_step[step] = collect(new_infections) - end + if !isempty(watch_set) + vertices_per_step[step] = T.(collect(intersect(new_infections, watch_set))) + else + vertices_per_step[step] = collect(new_infections) + end # Add new to master set of infected union!(infected_vertices, new_infections) @@ -88,9 +88,6 @@ diffusion_rate(g::AbstractGraph, p::Real, n::Integer; initial_infections::AbstractVector=LightGraphs.sample(vertices(g), 1), watch::AbstractVector=Vector{Int}(), normalize::Bool=false - ) = diffusion_rate( - diffusion(g, p, n, - initial_infections=initial_infections, - watch=watch, normalize=normalize - ) -) + ) = diffusion_rate(diffusion(g, p, n, + initial_infections=initial_infections, + watch=watch, normalize=normalize)) diff --git a/src/traversals/greedy_color.jl b/src/traversals/greedy_color.jl index 9935dd0ba..8c6491dae 100644 --- a/src/traversals/greedy_color.jl +++ b/src/traversals/greedy_color.jl @@ -3,7 +3,7 @@ Store number of colors used and mapping from vertex to color """ -struct coloring{T<:Integer} <: Any +struct coloring{T <: Integer} <: Any num_colors::T colors::Vector{T} end @@ -16,11 +16,7 @@ best_color(c1::coloring, c2::coloring) = c1.num_colors < c2.num_colors ? c1 : c2 Color graph `g` according to an order specified by `seq` using a greedy heuristic. seq[i] = v imples that vertex v is the ith vertex to be colored. """ -function perm_greedy_color( - g::AbstractGraph, - seq::Vector{T} - ) where T <: Integer - +function perm_greedy_color(g::AbstractGraph, seq::Vector{T}) where T <: Integer nvg::T = nv(g) cols = Vector{T}(undef, nvg) seen = zeros(Bool, nvg + 1) @@ -52,8 +48,8 @@ end Color graph `g` iteratively in the descending order of the degree of the vertices. """ -function degree_greedy_color(g::AbstractGraph{T}) where T<:Integer - seq = convert(Vector{T}, sortperm(degree(g) , rev=true)) +function degree_greedy_color(g::AbstractGraph{T}) where T <: Integer + seq = convert(Vector{T}, sortperm(degree(g), rev=true)) return perm_greedy_color(g, seq) end @@ -63,17 +59,15 @@ end Color graph `g` iteratively in a random order using a greedy heuristic and choose the best coloring out of `reps` number of colorings computed in parallel. """ -function parallel_random_greedy_color( - g::AbstractGraph{T}, - reps::Integer -) where T<:Integer +function parallel_random_greedy_color(g::AbstractGraph{T}, + reps::Integer) where T <: Integer - best = Distributed.@distributed (best_color) for i in 1:reps - seq = Random.shuffle(vertices(g)) + best = @distributed (best_color) for i in 1:reps + seq = shuffle(vertices(g)) perm_greedy_color(g, seq) end - return convert(coloring{T} ,best) + return convert(coloring{T}, best) end """ @@ -82,16 +76,14 @@ end Color graph `g` iteratively in a random order using a greedy heuristic and choose the best coloring out of `reps` such random coloring. """ -function seq_random_greedy_color( - g::AbstractGraph{T}, - reps::Integer -) where T <: Integer +function seq_random_greedy_color(g::AbstractGraph{T}, + reps::Integer) where T <: Integer - seq = Random.shuffle(vertices(g)) + seq = shuffle(vertices(g)) best = perm_greedy_color(g, seq) for i in 2:reps - Random.shuffle!(seq) + shuffle!(seq) best = best_color(best, perm_greedy_color(g, seq)) end return best @@ -105,7 +97,7 @@ and choose the best coloring out of `reps` such random coloring. If parallel is true then the colorings are executed in parallel. """ -random_greedy_color(g::AbstractGraph{T}, reps::Integer = 1, parallel::Bool = false) where {T<:Integer} = +random_greedy_color(g::AbstractGraph{T}, reps::Integer=1, parallel::Bool=false) where {T <: Integer} = parallel ? parallel_random_greedy_color(g, reps) : seq_random_greedy_color(g, reps) """ @@ -124,6 +116,6 @@ colors is chosen. If `parallel` is true then this function executes coloring in parallel. """ -greedy_color(g::AbstractGraph{U}; sort_degree::Bool=false, parallel::Bool =false, reps::Integer=1) where {U <: Integer} = +greedy_color(g::AbstractGraph{U}; sort_degree::Bool=false, parallel::Bool=false, reps::Integer=1) where {U <: Integer} = sort_degree ? degree_greedy_color(g) : random_greedy_color(g, reps, parallel) diff --git a/src/traversals/maxadjvisit.jl b/src/traversals/maxadjvisit.jl index 90025f5f0..ed22a6fa8 100644 --- a/src/traversals/maxadjvisit.jl +++ b/src/traversals/maxadjvisit.jl @@ -16,10 +16,8 @@ values that determines the partition in `g` (1 or 2) and `bestcut` is the weight of the cut that makes this partition. An optional `distmx` matrix may be specified; if omitted, edge distances are assumed to be 1. """ -function mincut( - g::AbstractGraph, - distmx::AbstractMatrix{T}=weights(g) -) where T <: Real +function mincut(g::AbstractGraph, + distmx::AbstractMatrix{T}=weights(g)) where T <: Real U = eltype(g) colormap = zeros(UInt8, nv(g)) ## 0 if unseen, 1 if processing and 2 if seen and closed @@ -27,7 +25,7 @@ function mincut( bestweight = typemax(T) cutweight = zero(T) visited = zero(U) ## number of vertices visited - pq = DataStructures.PriorityQueue{U, T}(Base.Order.Reverse) + pq = PriorityQueue{U,T}(Base.Order.Reverse) # Set number of visited neighbors for all vertices to 0 for v in vertices(g) @@ -42,7 +40,7 @@ function mincut( pq[one(U)] = one(T) while !isempty(pq) - u = DataStructures.dequeue!(pq) + u = dequeue!(pq) colormap[u] = 1 for v in outneighbors(g, u) @@ -81,15 +79,13 @@ be 1. If `log` (default `false`) is `true`, visitor events will be printed to `io`, which defaults to `STDOUT`; otherwise, no event information will be displayed. """ -function maximum_adjacency_visit( - g::AbstractGraph, +function maximum_adjacency_visit(g::AbstractGraph, distmx::AbstractMatrix{T}, log::Bool=false, - io::IO=stdout -) where T<:Real + io::IO=stdout) where T <: Real U = eltype(g) - pq = DataStructures.PriorityQueue{U, T}(Base.Order.Reverse) + pq = PriorityQueue{U,T}(Base.Order.Reverse) vertices_order = Vector{U}() has_key = ones(Bool, nv(g)) sizehint!(vertices_order, nv(g)) @@ -107,7 +103,7 @@ function maximum_adjacency_visit( #start traversing the graph while !isempty(pq) - u = DataStructures.dequeue!(pq) + u = dequeue!(pq) has_key[u] = false push!(vertices_order, u) log && println(io, "discover vertex: $u") @@ -123,9 +119,7 @@ function maximum_adjacency_visit( return vertices_order end -maximum_adjacency_visit(g::AbstractGraph) = maximum_adjacency_visit( - g, +maximum_adjacency_visit(g::AbstractGraph) = maximum_adjacency_visit(g, weights(g), false, - stdout -) + stdout) diff --git a/src/utils.jl b/src/utils.jl index fa0e33f05..a86ff2242 100644 --- a/src/utils.jl +++ b/src/utils.jl @@ -9,7 +9,7 @@ Sample `k` element from array `a` without repetition and eventually excluding el ### Implementation Notes Changes the order of the elements in `a`. For a non-mutating version, see [`sample`](@ref). """ -function sample!(rng::Random.AbstractRNG, a::AbstractVector, k::Integer; exclude = ()) +function sample!(rng::AbstractRNG, a::AbstractVector, k::Integer; exclude=()) minsize = k + length(exclude) length(a) < minsize && throw(ArgumentError("vector must be at least size $minsize")) res = Vector{eltype(a)}() @@ -27,7 +27,7 @@ function sample!(rng::Random.AbstractRNG, a::AbstractVector, k::Integer; exclude res end -sample!(a::AbstractVector, k::Integer; exclude = ()) = sample!(getRNG(), a, k; exclude = exclude) +sample!(a::AbstractVector, k::Integer; exclude=()) = sample!(getRNG(), a, k; exclude=exclude) """ sample([rng,] r, k) @@ -40,9 +40,9 @@ Sample `k` element from unit range `r` without repetition and eventually excludi ### Implementation Notes Unlike [`sample!`](@ref), does not produce side effects. """ -sample(a::UnitRange, k::Integer; exclude = ()) = sample!(getRNG(), collect(a), k; exclude = exclude) +sample(a::UnitRange, k::Integer; exclude=()) = sample!(getRNG(), collect(a), k; exclude=exclude) -getRNG(seed::Integer = -1) = seed >= 0 ? Random.MersenneTwister(seed) : Random.GLOBAL_RNG +getRNG(seed::Integer=-1) = seed >= 0 ? MersenneTwister(seed) : GLOBAL_RNG """ insorted(item, collection) diff --git a/test/biconnectivity/articulation.jl b/test/biconnectivity/articulation.jl index 3df06873b..d5cae2062 100644 --- a/test/biconnectivity/articulation.jl +++ b/test/biconnectivity/articulation.jl @@ -31,7 +31,7 @@ end end - hint = SparseArrays.blockdiag(WheelGraph(5), WheelGraph(5)) + hint = blockdiag(WheelGraph(5), WheelGraph(5)) add_edge!(hint, 5, 6) for h in (hint, Graph{UInt8}(hint), Graph{Int16}(hint)) @test @inferred(articulation(h)) == [5, 6] diff --git a/test/biconnectivity/biconnect.jl b/test/biconnectivity/biconnect.jl index adda7bdc7..927f54549 100644 --- a/test/biconnectivity/biconnect.jl +++ b/test/biconnectivity/biconnect.jl @@ -38,7 +38,7 @@ add_edge!(h, 3, 4) add_edge!(h, 1, 4) - gint = SparseArrays.blockdiag(g, h) + gint = blockdiag(g, h) add_edge!(gint, 4, 5) a = [[Edge(5, 8), Edge(7, 8), Edge(6, 7), Edge(5, 6)], [Edge(4, 5)], [Edge(1, 4), Edge(3, 4), Edge(2, 3), Edge(1, 2)]] diff --git a/test/centrality/pagerank.jl b/test/centrality/pagerank.jl index 9e4490994..48d97226e 100644 --- a/test/centrality/pagerank.jl +++ b/test/centrality/pagerank.jl @@ -1,28 +1,28 @@ @testset "Pagerank" begin function dense_pagerank_solver(g::AbstractGraph, α=0.85::Real) # M = google_matrix(g, α) - p = fill(1/nv(g), nv(g)) + p = fill(1 / nv(g), nv(g)) danglingnodes = outdegree(g) .== 0 M = Matrix{Float64}(adjacency_matrix(g)) M = M' M[:, danglingnodes] .= sum(danglingnodes) ./ nv(g) - M = M * LinearAlgebra.Diagonal(1 ./ sum(M, dims=1)[:]) - @assert all(1.01 .>= sum(M, dims=1).>=0.999) + M = M * Diagonal(1 ./ sum(M, dims=1)[:]) + @assert all(1.01 .>= sum(M, dims=1) .>= 0.999) # v = inv(I-β*M) * ((1-β)/nv(g) * ones(nv(g), 1)) - v = inv(I-α*M) * ((1-α)/nv(g) * ones(nv(g), 1)) + v = inv(I - α * M) * ((1 - α) / nv(g) * ones(nv(g), 1)) return v end function google_matrix(g::AbstractGraph, α=0.85::Real) - p = fill(1/nv(g), nv(g)) + p = fill(1 / nv(g), nv(g)) danglingnodes = outdegree(g) .== 0 M = Matrix{Float64}(adjacency_matrix(g)) @show M = M' M[:, danglingnodes] = sum(danglingnodes) ./ nv(g) - @show M = M * LinearAlgebra.Diagonal(1 ./ sum(M, dims=1)[:]) - @show sum(M,1) - @assert all(1.01 .>= sum(M, 1).>=0.999) - return α*M .+ (1-α)*p + @show M = M * Diagonal(1 ./ sum(M, dims=1)[:]) + @show sum(M, 1) + @assert all(1.01 .>= sum(M, 1) .>= 0.999) + return α * M .+ (1 - α) * p end g5 = SimpleDiGraph(4) @@ -30,19 +30,19 @@ g6 = SimpleGraph(4) add_edge!(g6, 1, 2); add_edge!(g6, 2, 3); add_edge!(g6, 1, 3); add_edge!(g6, 3, 4) for α in [0.75, 0.85] - for g in testdigraphs(g5) - @test pagerank(g)[3] ≈ 0.318 atol = 0.001 - @test length(@inferred(pagerank(g))) == nv(g) - @test_throws ErrorException pagerank(g, 2) - @test_throws ErrorException pagerank(g, α, 2) - @test isapprox(pagerank(g, α), dense_pagerank_solver(g, α), atol=0.001) - end + for g in testdigraphs(g5) + @test pagerank(g)[3] ≈ 0.318 atol = 0.001 + @test length(@inferred(pagerank(g))) == nv(g) + @test_throws ErrorException pagerank(g, 2) + @test_throws ErrorException pagerank(g, α, 2) + @test isapprox(pagerank(g, α), dense_pagerank_solver(g, α), atol=0.001) + end - for g in testgraphs(g6) - @test length(@inferred(pagerank(g))) == nv(g) - @test_throws ErrorException pagerank(g, 2) - @test_throws ErrorException pagerank(g, α, 2) - @test isapprox(pagerank(g, α), dense_pagerank_solver(g, α), atol = 0.001) - end + for g in testgraphs(g6) + @test length(@inferred(pagerank(g))) == nv(g) + @test_throws ErrorException pagerank(g, 2) + @test_throws ErrorException pagerank(g, α, 2) + @test isapprox(pagerank(g, α), dense_pagerank_solver(g, α), atol=0.001) + end end end diff --git a/test/community/core-periphery.jl b/test/community/core-periphery.jl index 570fbe9d0..a9f08d290 100644 --- a/test/community/core-periphery.jl +++ b/test/community/core-periphery.jl @@ -10,7 +10,7 @@ end g10 = StarGraph(10) - g10 = SparseArrays.blockdiag(g10, g10) + g10 = blockdiag(g10, g10) add_edge!(g10, 1, 11) for g in testgraphs(g10) c = @inferred(core_periphery_deg(g)) diff --git a/test/community/label_propagation.jl b/test/community/label_propagation.jl index 037c0f2cb..c96fbc6ba 100644 --- a/test/community/label_propagation.jl +++ b/test/community/label_propagation.jl @@ -4,7 +4,7 @@ for g in testgraphs(g10) z = copy(g) for k = 2:5 - z = SparseArrays.blockdiag(z, g) + z = blockdiag(z, g) add_edge!(z, (k - 1) * n, k * n) c, ch = @inferred(label_propagation(z)) a = collect(n:n:(k * n)) diff --git a/test/generators/binomial.jl b/test/generators/binomial.jl index f7cf13a24..71381dd43 100644 --- a/test/generators/binomial.jl +++ b/test/generators/binomial.jl @@ -12,8 +12,7 @@ import LightGraphs: randbn import StatsBase: SummaryStats function -(s::SummaryStats, t::SummaryStats) - return SummaryStats( - s.mean - t.mean, + return SummaryStats(s.mean - t.mean, s.min - t.min, s.q25 - t.q25, s.median - t.median, @@ -38,7 +37,7 @@ function binomial_test(n, p, s) @show dσ - lσ @test abs(dσ - lσ) / dσ < .10 end -Random.srand(1234) +srand(1234) n = 10000 p = 0.3 s = 100000 diff --git a/test/generators/randgraphs.jl b/test/generators/randgraphs.jl index e3b7d1497..5329f730c 100644 --- a/test/generators/randgraphs.jl +++ b/test/generators/randgraphs.jl @@ -256,7 +256,7 @@ bp = blockfractions(sbm, g) ./ (sizes * sizes') ratios = bp ./ (sbm.affinities ./ sum(sbm.affinities)) test_sbm(sbm, bp) - @test LinearAlgebra.norm(collect(ratios)) < 0.25 + @test norm(collect(ratios)) < 0.25 sizes = [200, 200, 100] internaldeg = 15 @@ -274,14 +274,14 @@ bp = blockfractions(sbm, g) ./ (sizes * sizes') test_sbm(sbm, bp) ratios = bp ./ (sbm.affinities ./ sum(sbm.affinities)) - @test LinearAlgebra.norm(collect(ratios)) < 0.25 + @test norm(collect(ratios)) < 0.25 # check that average degree is not too high # factor of two is cushion for random process @test mean(degree(g)) <= 4 // 2 * numedges / sum(sizes) # check that the internal degrees are higher than the external degrees # 5//4 is cushion for random process. - @test all(sum(bc - LinearAlgebra.diagm(0 => SparseArrays.diag(bc)), dims=1) .<= 5 // 4 .* SparseArrays.diag(bc)) + @test all(sum(bc - diagm(0 => diag(bc)), dims=1) .<= 5 // 4 .* diag(bc)) sbm2 = StochasticBlockModel(0.5 * ones(4), 0.3, 10 * ones(Int, 4)) diff --git a/test/generators/staticgraphs.jl b/test/generators/staticgraphs.jl index 79273bd36..d8803d81e 100644 --- a/test/generators/staticgraphs.jl +++ b/test/generators/staticgraphs.jl @@ -60,8 +60,8 @@ I = [1, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 7, 8, 8, 9, 9] J = [2, 3, 4, 1, 5, 1, 6, 1, 5, 6, 7, 2, 4, 8, 3, 4, 9, 4, 8, 9, 5, 7, 6, 7] V = ones(Int, length(I)) - Adj = SparseArrays.sparse(I, J, V) - @test Adj == SparseArrays.sparse(g) + Adj = sparse(I, J, V) + @test Adj == sparse(g) g = @inferred(DoubleBinaryTree(3)) # [[3, 2, 8] @@ -81,8 +81,8 @@ I = [1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 5, 6, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 12, 13, 14] J = [3, 2, 8, 4, 1, 5, 1, 6, 7, 2, 2, 3, 3, 10, 9, 1, 11, 8, 12, 8, 13, 14, 9, 9, 10, 10] V = ones(Int, length(I)) - Adj = SparseArrays.sparse(I, J, V) - @test Adj == SparseArrays.sparse(g) + Adj = sparse(I, J, V) + @test Adj == sparse(g) rg3 = @inferred(RoachGraph(3)) # [3] @@ -100,6 +100,6 @@ I = [1, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 7, 8, 8, 8, 9, 9, 9, 10, 10, 10, 11, 11, 12, 12] J = [3, 4, 1, 5, 2, 6, 3, 7, 4, 8, 9, 8, 5, 10, 7, 6, 11, 10, 7, 8, 9, 12, 9, 12, 10, 11] V = ones(Int, length(I)) - Adj = SparseArrays.sparse(I, J, V) - @test Adj == SparseArrays.sparse(rg3) + Adj = sparse(I, J, V) + @test Adj == sparse(rg3) end diff --git a/test/graphcut/normalized_cut.jl b/test/graphcut/normalized_cut.jl index 6eaeedd82..25701effb 100644 --- a/test/graphcut/normalized_cut.jl +++ b/test/graphcut/normalized_cut.jl @@ -33,7 +33,7 @@ @test labels == [1, 1, 1, 2, 2, 2] || labels == [2, 2, 2, 1, 1, 1] end - w = SparseArrays.SparseMatrixCSC(w) + w = SparseMatrixCSC(w) for g in testgraphs(gx) labels = @inferred(normalized_cut(g, 1, w)) @test labels == [1, 1, 1, 2, 2, 2] || labels == [2, 2, 2, 1, 1, 1] @@ -53,7 +53,7 @@ @test labels == [1, 1, 2, 2] || labels == [2, 2, 1, 1] end - w = SparseArrays.SparseMatrixCSC(w) + w = SparseMatrixCSC(w) for g in testgraphs(gx) labels = @inferred(normalized_cut(g, 0.1, w)) @test labels == [1, 1, 2, 2] || labels == [2, 2, 1, 1] diff --git a/test/linalg/graphmatrices.jl b/test/linalg/graphmatrices.jl index 8da8a955b..019fc498d 100644 --- a/test/linalg/graphmatrices.jl +++ b/test/linalg/graphmatrices.jl @@ -17,10 +17,10 @@ adjmat, stochmat, adjhat, avgmat = constructors(mat) @test adjmat.D == vec(sum(mat, dims=1)) @test adjmat.A == mat - @test isa(SparseArrays.sparse(mat), SparseArrays.SparseMatrixCSC) - @test isa(SparseArrays.sparse(stochmat), SparseArrays.SparseMatrixCSC) - @test isa(SparseArrays.sparse(adjhat), SparseArrays.SparseMatrixCSC) - @test isa(SparseArrays.sparse(avgmat), SparseArrays.SparseMatrixCSC) + @test isa(sparse(mat), SparseMatrixCSC) + @test isa(sparse(stochmat), SparseMatrixCSC) + @test isa(sparse(adjhat), SparseMatrixCSC) + @test isa(sparse(avgmat), SparseMatrixCSC) @test isa(convert(CombinatorialAdjacency, adjmat), CombinatorialAdjacency) @test isa(convert(CombinatorialAdjacency, avgmat), CombinatorialAdjacency) @test prescalefactor(adjhat) == postscalefactor(adjhat) @@ -60,7 +60,7 @@ @test_throws MethodError AveragingLaplacian(lapl) @test_throws MethodError convert(CombinatorialAdjacency, lapl) - L = SparseArrays.sparse(lapl) + L = sparse(lapl) @test sum(abs, (sum(L, dims=1))) == 0 end @@ -88,19 +88,19 @@ @test sum(abs, (adjmat * onevec)) > 0.0 @test sum(abs, ((stochmat * onevec) / sum(onevec))) ≈ 1.0 @test sum(abs, (lapl * onevec)) == 0 - g(a) = sum(abs, (sum(SparseArrays.sparse(a), dims=1))) + g(a) = sum(abs, (sum(sparse(a), dims=1))) @test g(lapl) == 0 @test g(NormalizedLaplacian(adjhat)) > 1e-13 @test g(StochasticLaplacian(stochmat)) > 1e-13 - @test IterativeEigensolvers.eigs(adjmat, which=:LR)[1][1] > 1.0 - @test IterativeEigensolvers.eigs(stochmat, which=:LR)[1][1] ≈ 1.0 - @test IterativeEigensolvers.eigs(avgmat, which=:LR)[1][1] ≈ 1.0 - @test IterativeEigensolvers.eigs(lapl, which=:LR)[1][1] > 2.0 - @test_throws MethodError IterativeEigensolvers.eigs(lapl, which=:SM)[1][1] # --> greater_than(-0.0) + @test eigs(adjmat, which=:LR)[1][1] > 1.0 + @test eigs(stochmat, which=:LR)[1][1] ≈ 1.0 + @test eigs(avgmat, which=:LR)[1][1] ≈ 1.0 + @test eigs(lapl, which=:LR)[1][1] > 2.0 + @test_throws MethodError eigs(lapl, which=:SM)[1][1] # --> greater_than(-0.0) lhat = NormalizedLaplacian(adjhat) - @test IterativeEigensolvers.eigs(lhat, which=:LR)[1][1] < 2.0 + 1e-9 + @test eigs(lhat, which=:LR)[1][1] < 2.0 + 1e-9 end function test_other(mat, n) @@ -112,20 +112,20 @@ @test_throws MethodError symmetrize(StochasticAdjacency(adjmat)) @test_throws MethodError symmetrize(AveragingAdjacency(adjmat)) - @test !LinearAlgebra.issymmetric(AveragingAdjacency(adjmat)) - @test !LinearAlgebra.issymmetric(StochasticAdjacency(adjmat)) + @test !issymmetric(AveragingAdjacency(adjmat)) + @test !issymmetric(StochasticAdjacency(adjmat)) @test_throws MethodError symmetrize(NormalizedAdjacency(adjmat)).A # --> adjmat.A begin @test CombinatorialAdjacency(mat) == CombinatorialAdjacency(mat) S = StochasticAdjacency(CombinatorialAdjacency(mat)) @test S.A == S.A - @test SparseArrays.sparse(S) != S.A + @test sparse(S) != S.A @test adjacency(S) == S.A @test NormalizedAdjacency(adjmat) != adjmat @test StochasticLaplacian(S) != adjmat @test_throws MethodError StochasticLaplacian(adjmat) # --> not(adjmat) - @test !LinearAlgebra.issymmetric(S) + @test !issymmetric(S) end end @@ -141,8 +141,8 @@ @test_throws MethodError symmetrize(NormalizedAdjacency(adjmat)).A # --> adjmat.A @test symmetrize(adjmat).A == adjmat.A # these tests are basically the code - @test symmetrize(adjmat, :triu).A == LinearAlgebra.triu(adjmat.A) + LinearAlgebra.triu(adjmat.A)' - @test symmetrize(adjmat, :tril).A == LinearAlgebra.tril(adjmat.A) + LinearAlgebra.tril(adjmat.A)' + @test symmetrize(adjmat, :triu).A == triu(adjmat.A) + triu(adjmat.A)' + @test symmetrize(adjmat, :tril).A == tril(adjmat.A) + tril(adjmat.A)' @test symmetrize(adjmat, :sum).A == adjmat.A + adjmat.A @test_throws ArgumentError symmetrize(adjmat, :fake) @@ -152,16 +152,16 @@ adjmat = CombinatorialAdjacency(mat) ahatp = PunchedAdjacency(adjmat) y = ahatp * perron(ahatp) - @test LinearAlgebra.dot(y, ahatp.perron) ≈ 0.0 atol = 1.0e-8 + @test dot(y, ahatp.perron) ≈ 0.0 atol = 1.0e-8 @test sum(abs, y) ≈ 0.0 atol = 1.0e-8 - eval, evecs = IterativeEigensolvers.eigs(ahatp, which=:LM) + eval, evecs = eigs(ahatp, which=:LM) @test eval[1] - (1 + 1.0e-8) <= 0 - @test LinearAlgebra.dot(perron(ahatp), evecs[:, 1]) ≈ 0.0 atol = 1e-8 + @test dot(perron(ahatp), evecs[:, 1]) ≈ 0.0 atol = 1e-8 ahat = ahatp.A @test isa(ahat, NormalizedAdjacency) z = ahatp * perron(ahat) - @test LinearAlgebra.norm(z) ≈ 0.0 atol = 1e-8 + @test norm(z) ≈ 0.0 atol = 1e-8 end @@ -183,7 +183,7 @@ """Computes the stationary distribution of a random walk""" function stationarydistribution(R::StochasticAdjacency; kwargs...) - er = IterativeEigensolvers.eigs(R, nev=1, which=:LR; kwargs...) + er = eigs(R, nev=1, which=:LR; kwargs...) l1 = er[1][1] abs(l1 - 1) < 1e-8 || error("failed to compute stationary distribution") # TODO 0.7: should we change the error type to InexactError? p = real(er[2][:, 1]) diff --git a/test/linalg/runtests.jl b/test/linalg/runtests.jl index 6a2c32ae5..3bf1906a5 100644 --- a/test/linalg/runtests.jl +++ b/test/linalg/runtests.jl @@ -1,6 +1,8 @@ using LightGraphs.LinAlg using IterativeEigensolvers - +using Random +using SparseArrays +using LinearAlgebra const linalgtestdir = dirname(@__FILE__) tests = [ diff --git a/test/linalg/spectral.jl b/test/linalg/spectral.jl index eaabb809a..b3a44d1e3 100644 --- a/test/linalg/spectral.jl +++ b/test/linalg/spectral.jl @@ -1,7 +1,7 @@ import Base: Matrix # just so that we can assert equality of matrices -Matrix(nbt::Nonbacktracking) = Matrix(SparseArrays.sparse(nbt)) +Matrix(nbt::Nonbacktracking) = Matrix(sparse(nbt)) @testset "Spectral" begin @@ -48,14 +48,14 @@ Matrix(nbt::Nonbacktracking) = Matrix(SparseArrays.sparse(nbt)) @test sum(B[:, i]) == 8 @test sum(B[i, :]) == 8 end - @test !LinearAlgebra.issymmetric(B) + @test !issymmetric(B) v = ones(Float64, ne(g)) z = zeros(Float64, nv(g)) n10 = Nonbacktracking(g) @test size(n10) == (2 * ne(g), 2 * ne(g)) @test eltype(n10) == Float64 - @test !LinearAlgebra.issymmetric(n10) + @test !issymmetric(n10) contract!(z, n10, v) @@ -93,9 +93,9 @@ Matrix(nbt::Nonbacktracking) = Matrix(SparseArrays.sparse(nbt)) T = eltype(g) amat = adjacency_matrix(g, Float64; dir=dir) lmat = laplacian_matrix(g, Float64; dir=dir) - @test isa(amat, SparseArrays.SparseMatrixCSC{Float64,T}) - @test isa(lmat, SparseArrays.SparseMatrixCSC{Float64,T}) - evals = LinearAlgebra.eigvals(Matrix(lmat)) + @test isa(amat, SparseMatrixCSC{Float64,T}) + @test isa(lmat, SparseMatrixCSC{Float64,T}) + evals = eigvals(Matrix(lmat)) @test all(evals .>= -1e-15) # positive semidefinite @test (minimum(evals)) ≈ 0 atol = 1e-13 end @@ -131,22 +131,22 @@ Matrix(nbt::Nonbacktracking) = Matrix(SparseArrays.sparse(nbt)) for g in testgraphs(pg) nbt = Nonbacktracking(g) B, emap = non_backtracking_matrix(g) - Bs = SparseArrays.sparse(nbt) - @test SparseArrays.sparse(B) == Bs - @test IterativeEigensolvers.eigs(nbt, nev=1)[1] ≈ IterativeEigensolvers.eigs(B, nev=1)[1] atol = 1e-5 + Bs = sparse(nbt) + @test sparse(B) == Bs + @test eigs(nbt, nev=1)[1] ≈ eigs(B, nev=1)[1] atol = 1e-5 # check that matvec works x = ones(Float64, nbt.m) y = nbt * x z = B * x - @test LinearAlgebra.norm(y - z) < 1e-8 + @test norm(y - z) < 1e-8 #check that matmat works and Matrix(nbt) == B - @test LinearAlgebra.norm(nbt * Matrix{Float64}(LinearAlgebra.I, nbt.m, nbt.m) - B) < 1e-8 + @test norm(nbt * Matrix{Float64}(I, nbt.m, nbt.m) - B) < 1e-8 #check that matmat works and Matrix(nbt) == B - @test LinearAlgebra.norm(nbt * Matrix{Float64}(LinearAlgebra.I, nbt.m, nbt.m) - B) < 1e-8 + @test norm(nbt * Matrix{Float64}(I, nbt.m, nbt.m) - B) < 1e-8 #check that we can use the implicit matvec in nonbacktrack_embedding @test size(y) == size(x) @@ -156,7 +156,7 @@ Matrix(nbt::Nonbacktracking) = Matrix(SparseArrays.sparse(nbt)) @test Matrix(B₁) == Matrix(B) @test B₁ * ones(size(B₁)[2]) == B * ones(size(B)[2]) @test size(B₁) == size(B) - @test !LinearAlgebra.issymmetric(B₁) + @test !issymmetric(B₁) @test eltype(B₁) == Float64 end # END tests for Nonbacktracking diff --git a/test/operators.jl b/test/operators.jl index 168545520..86735788f 100644 --- a/test/operators.jl +++ b/test/operators.jl @@ -8,7 +8,7 @@ @test nv(c) == 5 @test ne(c) == 6 - gb = @inferred(SparseArrays.blockdiag(g, g)) + gb = @inferred(blockdiag(g, g)) @test nv(gb) == 10 @test ne(gb) == 8 @@ -138,7 +138,7 @@ T = eltype(g) hc = CompleteGraph(2) h = Graph{T}(hc) - z = @inferred(SparseArrays.blockdiag(g, h)) + z = @inferred(blockdiag(g, h)) @test nv(z) == nv(g) + nv(h) @test ne(z) == ne(g) + ne(h) @test has_edge(z, 1, 2) @@ -173,10 +173,10 @@ @test size(p, 3) == 1 @test sum(p, 1) == sum(p, 2) @test_throws ArgumentError sum(p, 3) - @test SparseArrays.sparse(p) == adjacency_matrix(p) + @test sparse(p) == adjacency_matrix(p) @test length(p) == 100 @test ndims(p) == 2 - @test LinearAlgebra.issymmetric(p) + @test issymmetric(p) end gx = SimpleDiGraph(4) @@ -186,7 +186,7 @@ @test sum(g, 1) == [0, 1, 2, 1] @test sum(g, 2) == [2, 1, 1, 0] @test sum(g) == 4 - @test @inferred(!LinearAlgebra.issymmetric(g)) + @test @inferred(!issymmetric(g)) end nx = 20; ny = 21 @@ -204,7 +204,7 @@ m = nv(h) for i in 1:(len - 1) k = nv(g) - g = SparseArrays.blockdiag(g, h) + g = blockdiag(g, h) for v in 1:m add_edge!(g, v + (k - m), v + k) end diff --git a/test/runtests.jl b/test/runtests.jl index 29f74f113..f780f0321 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -5,6 +5,7 @@ using SparseArrays using LinearAlgebra using DelimitedFiles using Base64 +using Random const testdir = dirname(@__FILE__) diff --git a/test/shortestpaths/astar.jl b/test/shortestpaths/astar.jl index bc0c4ec08..88fb9550b 100644 --- a/test/shortestpaths/astar.jl +++ b/test/shortestpaths/astar.jl @@ -3,7 +3,7 @@ g4 = PathDiGraph(5) d1 = float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0]) - d2 = SparseArrays.sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) + d2 = sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) for g in testgraphs(g3), dg in testdigraphs(g4) @test @inferred(a_star(g, 1, 4, d1)) == @inferred(a_star(dg, 1, 4, d1)) == diff --git a/test/shortestpaths/bellman-ford.jl b/test/shortestpaths/bellman-ford.jl index 1ab42dce2..cfd977156 100644 --- a/test/shortestpaths/bellman-ford.jl +++ b/test/shortestpaths/bellman-ford.jl @@ -2,7 +2,7 @@ g4 = PathDiGraph(5) d1 = float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0]) - d2 = SparseArrays.sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) + d2 = sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) for g in testdigraphs(g4) y = @inferred(bellman_ford_shortest_paths(g, 2, d1)) z = @inferred(bellman_ford_shortest_paths(g, 2, d2)) diff --git a/test/shortestpaths/dijkstra.jl b/test/shortestpaths/dijkstra.jl index 754c02e41..ae34974c5 100644 --- a/test/shortestpaths/dijkstra.jl +++ b/test/shortestpaths/dijkstra.jl @@ -1,7 +1,7 @@ @testset "Dijkstra" begin g4 = PathDiGraph(5) d1 = float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0]) - d2 = SparseArrays.sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) + d2 = sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) for g in testdigraphs(g4) y = @inferred(dijkstra_shortest_paths(g, 2, d1)) diff --git a/test/shortestpaths/johnson.jl b/test/shortestpaths/johnson.jl index 97a4d1281..a8301fab7 100644 --- a/test/shortestpaths/johnson.jl +++ b/test/shortestpaths/johnson.jl @@ -1,44 +1,43 @@ @testset "Johnson" begin g3 = PathGraph(5) - d = LinearAlgebra.Symmetric([0 1 2 3 4; 1 0 6 7 8; 2 6 0 11 12; 3 7 11 0 16; 4 8 12 16 0]) + d = Symmetric([0 1 2 3 4; 1 0 6 7 8; 2 6 0 11 12; 3 7 11 0 16; 4 8 12 16 0]) for g in testgraphs(g3) - z = @inferred(johnson_shortest_paths(g, d)) - @test z.dists[3, :][:] == [7, 6, 0, 11, 27] - @test z.parents[3, :][:] == [2, 3, 0, 3, 4] + z = @inferred(johnson_shortest_paths(g, d)) + @test z.dists[3, :][:] == [7, 6, 0, 11, 27] + @test z.parents[3, :][:] == [2, 3, 0, 3, 4] - @test @inferred(enumerate_paths(z))[2][2] == [] - @test @inferred(enumerate_paths(z))[2][4] == enumerate_paths(z, 2)[4] == enumerate_paths(z, 2, 4) == [2, 3, 4] + @test @inferred(enumerate_paths(z))[2][2] == [] + @test @inferred(enumerate_paths(z))[2][4] == enumerate_paths(z, 2)[4] == enumerate_paths(z, 2, 4) == [2, 3, 4] - z = @inferred(johnson_shortest_paths(g, d, parallel=true)) - @test z.dists[3, :][:] == [7, 6, 0, 11, 27] - @test z.parents[3, :][:] == [2, 3, 0, 3, 4] + z = @inferred(johnson_shortest_paths(g, d, parallel=true)) + @test z.dists[3, :][:] == [7, 6, 0, 11, 27] + @test z.parents[3, :][:] == [2, 3, 0, 3, 4] - @test @inferred(enumerate_paths(z))[2][2] == [] - @test @inferred(enumerate_paths(z))[2][4] == enumerate_paths(z, 2)[4] == enumerate_paths(z, 2, 4) == [2, 3, 4] + @test @inferred(enumerate_paths(z))[2][2] == [] + @test @inferred(enumerate_paths(z))[2][4] == enumerate_paths(z, 2)[4] == enumerate_paths(z, 2, 4) == [2, 3, 4] end g4 = PathDiGraph(4) for g in testdigraphs(g4) - z = @inferred(johnson_shortest_paths(g)) - @test length(enumerate_paths(z, 4, 3)) == 0 - @test length(enumerate_paths(z, 4, 1)) == 0 - @test length(enumerate_paths(z, 2, 3)) == 2 + z = @inferred(johnson_shortest_paths(g)) + @test length(enumerate_paths(z, 4, 3)) == 0 + @test length(enumerate_paths(z, 4, 1)) == 0 + @test length(enumerate_paths(z, 2, 3)) == 2 - z = @inferred(johnson_shortest_paths(g, parallel=true)) - @test length(enumerate_paths(z, 4, 3)) == 0 - @test length(enumerate_paths(z, 4, 1)) == 0 - @test length(enumerate_paths(z, 2, 3)) == 2 + z = @inferred(johnson_shortest_paths(g, parallel=true)) + @test length(enumerate_paths(z, 4, 3)) == 0 + @test length(enumerate_paths(z, 4, 1)) == 0 + @test length(enumerate_paths(z, 2, 3)) == 2 end g5 = DiGraph([1 1 1 0 1; 0 1 0 1 1; 0 1 1 0 0; 1 0 1 1 0; 0 0 0 1 1]) d = [0 3 8 0 -4; 0 0 0 1 7; 0 4 0 0 0; 2 0 -5 0 0; 0 0 0 6 0] for g in testdigraphs(g5) - z = @inferred(johnson_shortest_paths(g, d)) - @test z.dists == [0 1 -3 2 -4; 3 0 -4 1 -1; 7 4 0 5 3; 2 -1 -5 0 -2; 8 5 1 6 0] - - z = @inferred(johnson_shortest_paths(g, d, parallel=true)) - @test z.dists == [0 1 -3 2 -4; 3 0 -4 1 -1; 7 4 0 5 3; 2 -1 -5 0 -2; 8 5 1 6 0] - end + z = @inferred(johnson_shortest_paths(g, d)) + @test z.dists == [0 1 -3 2 -4; 3 0 -4 1 -1; 7 4 0 5 3; 2 -1 -5 0 -2; 8 5 1 6 0] + z = @inferred(johnson_shortest_paths(g, d, parallel=true)) + @test z.dists == [0 1 -3 2 -4; 3 0 -4 1 -1; 7 4 0 5 3; 2 -1 -5 0 -2; 8 5 1 6 0] + end end diff --git a/test/shortestpaths/yen.jl b/test/shortestpaths/yen.jl index 5baef9331..ebec4b95f 100644 --- a/test/shortestpaths/yen.jl +++ b/test/shortestpaths/yen.jl @@ -1,7 +1,7 @@ @testset "Yen" begin g4 = PathDiGraph(5) d1 = float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0]) - d2 = SparseArrays.sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) + d2 = sparse(float([0 1 2 3 4; 5 0 6 7 8; 9 10 0 11 12; 13 14 15 0 16; 17 18 19 20 0])) for g in testdigraphs(g4) x = @inferred(yen_k_shortest_paths(g, 5, 5)) diff --git a/test/simplegraphs/simplegraphs.jl b/test/simplegraphs/simplegraphs.jl index 73502af8b..2f4523c26 100644 --- a/test/simplegraphs/simplegraphs.jl +++ b/test/simplegraphs/simplegraphs.jl @@ -194,8 +194,8 @@ import Random # We create an edge list, shuffle it and reverse half of its edges # using this edge list should result in the same graph edge_list = [e for e in edges(g)] - Random.shuffle!(Random.MersenneTwister(0), edge_list) - for i in rand(Random.MersenneTwister(0), 1:length(edge_list), length(edge_list) ÷ 2) + shuffle!(MersenneTwister(0), edge_list) + for i in rand(MersenneTwister(0), 1:length(edge_list), length(edge_list) ÷ 2) e = edge_list[i] Te = typeof(e) edge_list[i] = Te(dst(e), src(e)) @@ -229,7 +229,7 @@ import Random for g in testdigraphs(g_dir) # We create an edge list and shuffle it edge_list = [e for e in edges(g)] - Random.shuffle!(Random.MersenneTwister(0), edge_list) + shuffle!(MersenneTwister(0), edge_list) edge_iter = (e for e in edge_list) edge_set = Set(edge_list) @@ -268,7 +268,7 @@ import Random g_undir = SimpleGraph(0) for g in testgraphs(g_undir) T = edgetype(g) - edge_list = T.([(4,4),(1,2),(4,4),(1,2),(4,4),(2,1),(0,1),(1,0),(0,0)]) + edge_list = T.([(4, 4),(1, 2),(4, 4),(1, 2),(4, 4),(2, 1),(0, 1),(1, 0),(0, 0)]) edge_iter = (e for e in edge_list) edge_set = Set(edge_list) edge_set_any = Set{Any}(edge_list) @@ -294,7 +294,7 @@ import Random g_dir = SimpleDiGraph(0) for g in testdigraphs(g_dir) T = edgetype(g) - edge_list = T.([(4,4),(1,2),(4,4),(1,2),(4,4),(2,1),(0,1),(1,0),(0,0)]) + edge_list = T.([(4, 4),(1, 2),(4, 4),(1, 2),(4, 4),(2, 1),(0, 1),(1, 0),(0, 0)]) edge_iter = (e for e in edge_list) edge_set = Set(edge_list) edge_set_any = Set{Any}(edge_list) @@ -322,8 +322,8 @@ import Random g_undir = SimpleGraph(0) for g in testgraphs(g_undir) T = edgetype(g) - edge_list_good = Any[ T.(1,2), T.(3,4) ] - edge_list_bad = Any[ T.(1,2), Int64(1) ] + edge_list_good = Any[ T.(1, 2), T.(3, 4) ] + edge_list_bad = Any[ T.(1, 2), Int64(1) ] g1 = SimpleGraphFromIterator(edge_list_good) @test edgetype(g1) == T @@ -332,8 +332,8 @@ import Random g_dir = SimpleDiGraph(0) for g in testdigraphs(g_dir) T = edgetype(g) - edge_list_good = Any[ T.(1,2), T.(3,4) ] - edge_list_bad = Any[ T.(1,2), Int64(1) ] + edge_list_good = Any[ T.(1, 2), T.(3, 4) ] + edge_list_bad = Any[ T.(1, 2), Int64(1) ] g1 = SimpleDiGraphFromIterator(edge_list_good) @test edgetype(g1) == T @@ -342,8 +342,8 @@ import Random # If there are edges of multiple types, they should be propagated # to a common supertype - edge_list_1 = Any[Edge{Int8}(1,2), Edge{Int16}(3,4)] - edge_list_2 = Any[Edge{Int16}(1,2), Edge{Int8}(3,4)] + edge_list_1 = Any[Edge{Int8}(1, 2), Edge{Int16}(3, 4)] + edge_list_2 = Any[Edge{Int16}(1, 2), Edge{Int8}(3, 4)] g1_undir = SimpleGraphFromIterator(edge_list_1) g2_undir = SimpleGraphFromIterator(edge_list_2) g1_dir = SimpleGraphFromIterator(edge_list_1) diff --git a/test/traversals/bipartition.jl b/test/traversals/bipartition.jl index 0efb06b70..eb2de13be 100644 --- a/test/traversals/bipartition.jl +++ b/test/traversals/bipartition.jl @@ -23,7 +23,7 @@ T = eltype(g) @test @inferred(bipartite_map(g)) == Vector{T}([ones(T, 10); 2 * ones(T, 10)]) - h = SparseArrays.blockdiag(g, g) + h = blockdiag(g, g) @test @inferred(bipartite_map(h)) == Vector{T}([ones(T, 10); 2 * ones(T, 10); ones(T, 10); 2 * ones(T, 10)]) end