From d6580d338f0a59cf2651d15c44baa0f35efb2b43 Mon Sep 17 00:00:00 2001 From: Jutho Date: Wed, 11 Sep 2024 17:50:50 -0700 Subject: [PATCH] change tensor structure (part1) --- src/TensorKit.jl | 40 +- src/auxiliary/auxiliary.jl | 2 +- src/auxiliary/linalg.jl | 25 +- src/spaces/homspace.jl | 108 +++- src/spaces/productspace.jl | 28 +- src/tensors/abstracttensor.jl | 125 ++++- src/tensors/adjoint.jl | 92 +-- src/tensors/linalg.jl | 78 +-- src/tensors/tensor.jl | 623 ++++++++------------ src/tensors/vectorinterface.jl | 24 +- test/runtests.jl | 18 +- test/tensors.jl | 999 +++++++++++++++++---------------- 12 files changed, 1053 insertions(+), 1109 deletions(-) diff --git a/src/TensorKit.jl b/src/TensorKit.jl index 30f423d6..b43cf47a 100644 --- a/src/TensorKit.jl +++ b/src/TensorKit.jl @@ -175,34 +175,34 @@ include("fusiontrees/fusiontrees.jl") #------------------------------------------- include("spaces/vectorspaces.jl") -# # Definitions and methods for tensors -# #------------------------------------- -# # general definitions +# Definitions and methods for tensors +#------------------------------------- +# general definitions include("tensors/abstracttensor.jl") -include("tensors/tensortreeiterator.jl") +# include("tensors/tensortreeiterator.jl") include("tensors/tensor.jl") include("tensors/adjoint.jl") include("tensors/linalg.jl") include("tensors/vectorinterface.jl") -include("tensors/tensoroperations.jl") -include("tensors/indexmanipulations.jl") -include("tensors/truncation.jl") -include("tensors/factorizations.jl") -include("tensors/braidingtensor.jl") +# include("tensors/tensoroperations.jl") +# include("tensors/indexmanipulations.jl") +# include("tensors/truncation.jl") +# include("tensors/factorizations.jl") +# include("tensors/braidingtensor.jl") # # Planar macros and related functionality # #----------------------------------------- -@nospecialize -using Base.Meta: isexpr -include("planar/analyzers.jl") -include("planar/preprocessors.jl") -include("planar/postprocessors.jl") -include("planar/macros.jl") -@specialize -include("planar/planaroperations.jl") - -# deprecations: to be removed in version 1.0 or sooner -include("auxiliary/deprecate.jl") +# @nospecialize +# using Base.Meta: isexpr +# include("planar/analyzers.jl") +# include("planar/preprocessors.jl") +# include("planar/postprocessors.jl") +# include("planar/macros.jl") +# @specialize +# include("planar/planaroperations.jl") + +# # deprecations: to be removed in version 1.0 or sooner +# include("auxiliary/deprecate.jl") # Extensions # ---------- diff --git a/src/auxiliary/auxiliary.jl b/src/auxiliary/auxiliary.jl index 4dbec263..e4eb90fb 100644 --- a/src/auxiliary/auxiliary.jl +++ b/src/auxiliary/auxiliary.jl @@ -50,7 +50,7 @@ else using Base: @constprop end -const MatOrNumber{T<:Number} = Union{DenseMatrix{T},T} +const VecOrNumber{T<:Number} = Union{DenseVector{T},T} """ _interleave(a::NTuple{N}, b::NTuple{N}) -> NTuple{2N} diff --git a/src/auxiliary/linalg.jl b/src/auxiliary/linalg.jl index f0f9b1db..8ba1806d 100644 --- a/src/auxiliary/linalg.jl +++ b/src/auxiliary/linalg.jl @@ -66,21 +66,10 @@ using LinearAlgebra: BlasFloat, BlasReal, BlasComplex, checksquare using ..TensorKit: OrthogonalFactorizationAlgorithm, QL, QLpos, QR, QRpos, LQ, LQpos, RQ, RQpos, SVD, SDD, Polar -# only defined in >v1.7 -@static if VERSION < v"1.7-" - _rf_findmax((fm, im), (fx, ix)) = isless(fm, fx) ? (fx, ix) : (fm, im) - _argmax(f, domain) = mapfoldl(x -> (f(x), x), _rf_findmax, domain)[2] -else - _argmax(f, domain) = argmax(f, domain) -end - # TODO: define for CuMatrix if we support this -function one!(A::DenseMatrix) - Threads.@threads for j in 1:size(A, 2) - @simd for i in 1:size(A, 1) - @inbounds A[i, j] = i == j - end - end +function one!(A::StridedMatrix) + A[:] .= 0 + A[diagind(A)] .= 1 return A end @@ -291,12 +280,12 @@ function eig!(A::StridedMatrix{T}; permute::Bool=true, scale::Bool=true) where { while j <= n if DI[j] == 0 vr = view(VR, :, j) - s = conj(sign(_argmax(abs, vr))) + s = conj(sign(argmax(abs, vr))) V[:, j] .= s .* vr else vr = view(VR, :, j) vi = view(VR, :, j + 1) - s = conj(sign(_argmax(abs, vr))) # vectors coming from lapack have already real absmax component + s = conj(sign(argmax(abs, vr))) # vectors coming from lapack have already real absmax component V[:, j] .= s .* (vr .+ im .* vi) V[:, j + 1] .= s .* (vr .- im .* vi) j += 1 @@ -314,7 +303,7 @@ function eig!(A::StridedMatrix{T}; permute::Bool=true, A)[[2, 4]] for j in 1:n v = view(V, :, j) - s = conj(sign(_argmax(abs, v))) + s = conj(sign(argmax(abs, v))) v .*= s end return D, V @@ -326,7 +315,7 @@ function eigh!(A::StridedMatrix{T}) where {T<:BlasFloat} D, V = LAPACK.syevr!('V', 'A', 'U', A, 0.0, 0.0, 0, 0, -1.0) for j in 1:n v = view(V, :, j) - s = conj(sign(_argmax(abs, v))) + s = conj(sign(argmax(abs, v))) v .*= s end return D, V diff --git a/src/spaces/homspace.jl b/src/spaces/homspace.jl index c1c4e6af..5b5f1318 100644 --- a/src/spaces/homspace.jl +++ b/src/spaces/homspace.jl @@ -87,9 +87,9 @@ function blocksectors(W::HomSpace) if N₁ == 0 || N₂ == 0 return (one(I),) elseif N₂ <= N₁ - return filter!(c -> hasblock(codom, c), collect(blocksectors(dom))) + return sort!(filter!(c -> hasblock(codom, c), collect(blocksectors(dom)))) else - return filter!(c -> hasblock(dom, c), collect(blocksectors(codom))) + return sort!(filter!(c -> hasblock(dom, c), collect(blocksectors(codom)))) end end @@ -134,3 +134,107 @@ function compose(W::HomSpace{S}, V::HomSpace{S}) where {S} domain(W) == codomain(V) || throw(SpaceMismatch("$(domain(W)) ≠ $(codomain(V))")) return HomSpace(codomain(W), domain(V)) end + +# Block and fusion tree ranges: structure information for building tensors +#-------------------------------------------------------------------------- +struct TensorStructure{I,F₁,F₂} + totaldim::Int + blockstructure::SectorDict{I,Tuple{Tuple{Int,Int},UnitRange{Int}}} + fusiontreelist::Vector{Tuple{F₁,F₂}} + fusiontreeranges::Vector{Tuple{UnitRange{Int},UnitRange{Int}}} + fusiontreeindices::FusionTreeDict{Tuple{F₁,F₂},Int} +end + +abstract type CacheStyle end +struct NoCache <: CacheStyle end +struct TaskLocalCache{D<:AbstractDict} <: CacheStyle end +struct GlobalCache <: CacheStyle end + +function CacheStyle(I::Type{<:Sector}) + return GlobalCache() + # if FusionStyle(I) === UniqueFusion() + # return TaskLocalCache{SectorDict{I,Any}}() + # else + # return GlobalCache() + # end +end + +tensorstructure(W::HomSpace) = tensorstructure(W, CacheStyle(sectortype(W))) + +function tensorstructure(W::HomSpace, ::NoCache) + codom = codomain(W) + dom = domain(W) + N₁ = length(codom) + N₂ = length(dom) + I = sectortype(W) + F₁ = fusiontreetype(I, N₁) + F₂ = fusiontreetype(I, N₂) + + blockstructure = SectorDict{I,Tuple{Tuple{Int,Int},UnitRange{Int}}}() + fusiontreelist = Vector{Tuple{F₁,F₂}}() + fusiontreeranges = Vector{Tuple{UnitRange{Int},UnitRange{Int}}}() + outer_offset = 0 + for c in blocksectors(W) + inner_offset₂ = 0 + inner_offset₁ = 0 + for f₂ in fusiontrees(dom, c) + s₂ = f₂.uncoupled + d₂ = dim(dom, s₂) + r₂ = (inner_offset₂ + 1):(inner_offset₂ + d₂) + inner_offset₂ = last(r₂) + # TODO: # now we run the code below for every f₂; should we do this separately + inner_offset₁ = 0 # reset here to avoid multiple counting + for f₁ in fusiontrees(codom, c) + s₁ = f₁.uncoupled + d₁ = dim(codom, s₁) + r₁ = (inner_offset₁ + 1):(inner_offset₁ + d₁) + inner_offset₁ = last(r₁) + push!(fusiontreelist, (f₁, f₂)) + push!(fusiontreeranges, (r₁, r₂)) + end + end + blocksize = (inner_offset₁, inner_offset₂) + blocklength = blocksize[1] * blocksize[2] + blockrange = (outer_offset + 1):(outer_offset + blocklength) + outer_offset = last(blockrange) + blockstructure[c] = (blocksize, blockrange) + end + fusiontreeindices = sizehint!(FusionTreeDict{Tuple{F₁,F₂},Int}(), length(fusiontreelist)) + for i = 1:length(fusiontreelist) + fusiontreeindices[fusiontreelist[i]] = i + end + totaldim = outer_offset + structure = TensorStructure(totaldim, blockstructure, fusiontreelist, fusiontreeranges, fusiontreeindices) + return structure +end + +function tensorstructure(W::HomSpace, ::TaskLocalCache{D}) where {D} + cache::D = get!(task_local_storage(), :_local_tensorstructure_cache) do + return D() + end + N₁ = length(codomain(W)) + N₂ = length(domain(W)) + I = sectortype(W) + F₁ = fusiontreetype(I, N₁) + F₂ = fusiontreetype(I, N₂) + structure::TensorStructure{I,F₁,F₂} = get!(cache, W) do + tensorstructure(W, NoCache()) + end + return structure +end + +const GLOBAL_TENSORSTRUCTURE_CACHE = LRU{Any,Any}(; maxsize=10^4) +# 10^4 different tensor spaces should be enough for most purposes +function tensorstructure(W::HomSpace, ::GlobalCache) + cache = GLOBAL_TENSORSTRUCTURE_CACHE + N₁ = length(codomain(W)) + N₂ = length(domain(W)) + I = sectortype(W) + F₁ = fusiontreetype(I, N₁) + F₂ = fusiontreetype(I, N₂) + structure::TensorStructure{I,F₁,F₂} = get!(cache, W) do + return tensorstructure(W, NoCache()) + end + return structure +end + diff --git a/src/spaces/productspace.jl b/src/spaces/productspace.jl index 36e57693..51adba91 100644 --- a/src/spaces/productspace.jl +++ b/src/spaces/productspace.jl @@ -134,20 +134,6 @@ function Base.axes(P::ProductSpace{<:ElementarySpace,N}, return map(axes, P.spaces, sectors) end -""" - fusiontrees(P::ProductSpace, blocksector::Sector) - -Return an iterator over all fusion trees that can be formed by fusing the sectors present -in the different spaces that make up the `ProductSpace` instance, resulting in the coupled -sector `blocksector`. -""" -function fusiontrees(P::ProductSpace{S,N}, blocksector::I) where {S,N,I} - I == sectortype(S) || throw(SectorMismatch()) - uncoupled = map(sectors, P.spaces) - isdualflags = map(isdual, P.spaces) - return FusionTreeIterator(uncoupled, blocksector, isdualflags) -end - """ blocksectors(P::ProductSpace) @@ -179,6 +165,20 @@ function blocksectors(P::ProductSpace{S,N}) where {S,N} return bs end +""" + fusiontrees(P::ProductSpace, blocksector::Sector) + +Return an iterator over all fusion trees that can be formed by fusing the sectors present +in the different spaces that make up the `ProductSpace` instance into the coupled sector +`blocksector`. +""" +function fusiontrees(P::ProductSpace{S,N}, blocksector::I) where {S,N,I} + I == sectortype(S) || throw(SectorMismatch()) + uncoupled = map(sectors, P.spaces) + isdualflags = map(isdual, P.spaces) + return FusionTreeIterator(uncoupled, blocksector, isdualflags) +end + """ hasblock(P::ProductSpace, c::Sector) diff --git a/src/tensors/abstracttensor.jl b/src/tensors/abstracttensor.jl index 5d7c0a20..5a906a03 100644 --- a/src/tensors/abstracttensor.jl +++ b/src/tensors/abstracttensor.jl @@ -38,7 +38,7 @@ spacetype(::Type{<:AbstractTensorMap{<:Any,S}}) where {S} = S Return the type of sector `I` of a tensor. """ -sectortype(::Type{TT}) where {TT<:AbstractTensorMap} = sectortype(spacetype(TT)) +sectortype(::Type{<:AbstractTensorMap{<:Any,S}}) where {S} = sectortype(S) function InnerProductStyle(::Type{TT}) where {TT<:AbstractTensorMap} return InnerProductStyle(spacetype(TT)) @@ -106,6 +106,7 @@ specified, return the `i`-th output space. Implementations should provide `codom See also [`domain`](@ref) and [`space`](@ref). """ codomain +codomain(t::AbstractTensorMap) = codomain(space(t)) codomain(t::AbstractTensorMap, i) = codomain(t)[i] target(t::AbstractTensorMap) = codomain(t) # categorical terminology @@ -119,6 +120,7 @@ specified, return the `i`-th input space. Implementations should provide `domain See also [`codomain`](@ref) and [`space`](@ref). """ domain +domain(t::AbstractTensorMap) = domain(space(t)) domain(t::AbstractTensorMap, i) = domain(t)[i] source(t::AbstractTensorMap) = domain(t) # categorical terminology @@ -128,16 +130,23 @@ source(t::AbstractTensorMap) = domain(t) # categorical terminology The index information of a tensor, i.e. the `HomSpace` of its domain and codomain. If `i` is specified, return the `i`-th index space. """ -space(t::AbstractTensorMap) = HomSpace(codomain(t), domain(t)) space(t::AbstractTensorMap, i::Int) = space(t)[i] +""" + tensorstructure(t::AbstractTensorMap) -> TensorStructure + +Return the necessary structure information to decompose a tensor in blocks labeled by +coupled sectors and in subblocks labeled by a splitting-fusion tree couple. +""" +tensorstructure(t::AbstractTensorMap) = tensorstructure(space(t)) + """ dim(t::AbstractTensorMap) -> Int The total number of free parameters of a tensor, discounting the entries that are fixed by symmetry. This is also the dimension of the `HomSpace` on which the `TensorMap` is defined. """ -dim(t::AbstractTensorMap) = dim(space(t)) +dim(t::AbstractTensorMap) = tensorstructure(t).totaldim """ codomainind(::Union{TT,Type{TT}}) where {TT<:AbstractTensorMap} -> Tuple{Int} @@ -184,7 +193,7 @@ function adjointtensorindices(t::AbstractTensorMap, indices::IndexTuple) end function adjointtensorindices(t::AbstractTensorMap, p::Index2Tuple) - return adjointtensorindices(t, p[1]), adjointtensorindices(t, p[2]) + return (adjointtensorindices(t, p[1]), adjointtensorindices(t, p[2])) end @doc """ @@ -194,7 +203,9 @@ Return an iterator over all blocks of a tensor, i.e. all coupled sectors and the corresponding blocks. See also [`block`](@ref), [`blocksectors`](@ref), [`blockdim`](@ref) and [`hasblock`](@ref). -""" blocks +""" +blocks(t::AbstractTensorMap) = SectorDict(c => block(t, c) for c in blocksectors(t)) # TODO: make iterator + @doc """ block(t::AbstractTensorMap, c::Sector) -> DenseMatrix @@ -205,29 +216,90 @@ See also [`blocks`](@ref), [`blocksectors`](@ref), [`blockdim`](@ref) and [`hasb """ block """ - hasblock(t::AbstractTensorMap, c::Sector) -> Bool + blocksectors(t::AbstractTensorMap) -Verify whether a tensor has a block corresponding to a coupled sector `c`. +Return an iterator over all coupled sectors of a tensor. """ -hasblock +blocksectors(t::AbstractTensorMap) = keys(tensorstructure(t).blockstructure) -@doc """ - blocksectors(t::AbstractTensorMap) +""" + hasblock(t::AbstractTensorMap, c::Sector) -> Bool -Return an iterator over all coupled sectors of a tensor. -""" blocksectors(::AbstractTensorMap) +Verify whether a tensor has a block corresponding to a coupled sector `c`. +""" +hasblock(t::AbstractTensorMap, c::Sector) = c ∈ blocksectors(t) -@doc """ - blockdim(t::AbstractTensorMap, c::Sector) -> Base.Dims +# @doc """ +# blockdim(t::AbstractTensorMap, c::Sector) -> Base.Dims -Return the dimensions of the block of a tensor corresponding to a coupled sector `c`. -""" blockdim(::AbstractTensorMap, ::Sector) +# Return the dimensions of the block of a tensor corresponding to a coupled sector `c`. +# """ blockdim(::AbstractTensorMap, ::Sector) -@doc """ +""" fusiontrees(t::AbstractTensorMap) Return an iterator over all splitting - fusion tree pairs of a tensor. -""" fusiontrees(::AbstractTensorMap) +""" +fusiontrees(t::AbstractTensorMap) = tensorstructure(t).fusiontreelist + +# auxiliary function +@inline function trivial_fusiontree(t::AbstractTensorMap) + sectortype(t) === Trivial || + throw(SectorMismatch("Only valid for tensors with trivial symmetry")) + spaces1 = codomain(t).spaces + spaces2 = domain(t).spaces + f₁ = FusionTree{Trivial}(map(x -> Trivial(), spaces1), Trivial(), map(isdual, spaces1)) + f₂ = FusionTree{Trivial}(map(x -> Trivial(), spaces2), Trivial(), map(isdual, spaces2)) + return (f₁, f₂) +end + +# Derived indexing behavior for tensors with trivial symmetry +#------------------------------------------------------------- +using TensorKit.Strided: SliceIndex + +# For a tensor with trivial symmetry, allow direct indexing +# TODO: should we allow range indices as well +# TODO 2: should we enable this for (abelian) symmetric tensors with some CUDA like `allowscalar` flag? +# TODO 3: should we then also allow at least `getindex` for nonabelian tensors +""" + Base.getindex(t::AbstractTensorMap, indices::Vararg{Int}) + t[indices] + +Return a view into the data slice of `t` corresponding to `indices`, by slicing the +`StridedViews.StridedView` into the full data array. +""" +@inline function Base.getindex(t::AbstractTensorMap, indices::Vararg{SliceIndex}) + data = t[trivial_fusiontree(t)...] + @boundscheck checkbounds(data, indices...) + @inbounds v = data[indices...] + return v +end +""" + Base.setindex!(t::AbstractTensorMap, v, indices::Vararg{Int}) + t[indices] = v + +Assigns `v` to the data slice of `t` corresponding to `indices`. +""" +@inline function Base.setindex!(t::AbstractTensorMap, v, indices::Vararg{SliceIndex}) + data = t[trivial_fusiontree(t)...] + @boundscheck checkbounds(data, indices...) + @inbounds data[indices...] = v + return v +end + +# TODO : probably deprecate the following +# For a tensor with trivial symmetry, allow no argument indexing +""" + Base.getindex(t::AbstractTensorMap) + t[] + +Return a view into the data of `t` as a `StridedViews.StridedView` of size +`(dims(codomain(t))..., dims(domain(t))...)`. +""" +@inline function Base.getindex(t::AbstractTensorMap) + return t[trivial_fusiontree(t)...] +end +@inline Base.setindex!(t::AbstractTensorMap, v) = copy!(getindex(t), v) # Similar #--------- @@ -270,12 +342,21 @@ Base.similar(t::AbstractTensorMap, ::Type{T}) where {T} = similar(t, T, space(t) Base.similar(t::AbstractTensorMap) = similar(t, storagetype(t), space(t)) # generic implementation for AbstractTensorMap -> returns `TensorMap` -function Base.similar(::AbstractTensorMap, ::Type{TorA}, - P::TensorMapSpace{S}) where {TorA<:MatOrNumber,S} +function Base.similar(t::AbstractTensorMap, ::Type{TorA}, + P::TensorMapSpace{S}) where {TorA,S} + + if TorA <: Number + T = TorA + A = similarstoragetype(t, T) + elseif TorA <: DenseVector + A = TorA + T = scalartype(A) + else + throw(ArgumentError("Type $TorA not supported for similar")) + end N₁ = length(codomain(P)) N₂ = length(domain(P)) - TT = tensormaptype(S, N₁, N₂, TorA) - return TT(undef, codomain(P), domain(P)) + return TensorMap{T,S,N₁,N₂,A}(undef, P) end # implementation in type-domain diff --git a/src/tensors/adjoint.jl b/src/tensors/adjoint.jl index aa6628e4..f758c53c 100644 --- a/src/tensors/adjoint.jl +++ b/src/tensors/adjoint.jl @@ -6,100 +6,38 @@ Specific subtype of [`AbstractTensorMap`](@ref) that is a lazy wrapper for representing the adjoint of an instance of [`AbstractTensorMap`](@ref). """ -struct AdjointTensorMap{T,S,N₁,N₂,TT<:AbstractTensorMap{T,S,N₂,N₁}} <: +struct AdjointTensorMap{T,S,N₁,N₂,TT<:AbstractTensorMap{T,S,N₁,N₂}} <: AbstractTensorMap{T,S,N₁,N₂} parent::TT end -#! format: off -const AdjointTrivialTensorMap{T,S,N₁,N₂,TT<:TrivialTensorMap{T,S,N₂,N₁}} = - AdjointTensorMap{T,S,N₁,N₂,TT} -#! format: on - # Constructor: construct from taking adjoint of a tensor +Base.adjoint(t::AdjointTensorMap) = t.parent Base.adjoint(t::AbstractTensorMap) = AdjointTensorMap(t) -Base.adjoint(t::AdjointTensorMap) = parent(t) - -Base.parent(t::AdjointTensorMap) = t.parent -parenttype(::Type{<:AdjointTensorMap{T,S,N₁,N₂,TT}}) where {T,S,N₁,N₂,TT} = TT - -function Base.similar(t::AdjointTensorMap, ::Type{TorA}, - P::TensorMapSpace) where {TorA<:MatOrNumber} - return similar(t', TorA, P) -end # Properties -codomain(t::AdjointTensorMap) = domain(parent(t)) -domain(t::AdjointTensorMap) = codomain(parent(t)) - -blocksectors(t::AdjointTensorMap) = blocksectors(parent(t)) +space(t::AdjointTensorMap) = adjoint(space(t')) +dim(t::AdjointTensorMap) = dim(t') +storagetype(::Type{AdjointTensorMap{T,S,N₁,N₂,TT}}) where {T,S,N₁,N₂,TT} = storagetype(TT) -storagetype(::Type{TT}) where {TT<:AdjointTensorMap} = storagetype(parenttype(TT)) +# Blocks and subblocks +#---------------------- +blocksectors(t::AdjointTensorMap) = blocksectors(t') +block(t::AdjointTensorMap, s::Sector) = block(t', s)' -dim(t::AdjointTensorMap) = dim(parent(t)) - -# Indexing -#---------- -hasblock(t::AdjointTensorMap, s::Sector) = hasblock(parent(t), s) -block(t::AdjointTensorMap, s::Sector) = block(parent(t), s)' -blocks(t::AdjointTensorMap) = (c => b' for (c, b) in blocks(parent(t))) - -fusiontrees(::AdjointTrivialTensorMap) = ((nothing, nothing),) -function fusiontrees(t::AdjointTensorMap{T,S,N₁,N₂,TT}) where {T,S,N₁,N₂,TT<:TensorMap} - return TensorKeyIterator(parent(t).colr, parent(t).rowr) -end - -function Base.getindex(t::AdjointTensorMap{T,S,N₁,N₂,<:TensorMap{T,S,N₁,N₂,I}}, +function Base.getindex(t::AdjointTensorMap{T,S,N₁,N₂}, f₁::FusionTree{I,N₁}, f₂::FusionTree{I,N₂}) where {T,S,N₁,N₂,I} - c = f₁.coupled - @boundscheck begin - c == f₂.coupled || throw(SectorMismatch()) - hassector(codomain(t), f₁.uncoupled) && hassector(domain(t), f₂.uncoupled) - end - return sreshape((StridedView(parent(t).data[c])[parent(t).rowr[c][f₂], - parent(t).colr[c][f₁]])', - (dims(codomain(t), f₁.uncoupled)..., dims(domain(t), f₂.uncoupled)...)) -end -@propagate_inbounds function Base.getindex(t::AdjointTensorMap{T,S,N₁,N₂}, - f₁::FusionTree{I,N₁}, - f₂::FusionTree{I,N₂}) where {T,S,N₁,N₂,I} - d_cod = dims(codomain(t), f₁.uncoupled) - d_dom = dims(domain(t), f₂.uncoupled) - return sreshape(sreshape(StridedView(parent(t)[f₂, f₁]), (prod(d_dom), prod(d_cod)))', - (d_cod..., d_dom...)) -end -@propagate_inbounds function Base.setindex!(t::AdjointTensorMap{T,S,N₁,N₂,I}, v, + parent = t' + subblock = getindex(parent, f₂, f₁) + return permutedims(conj(subblock), (domainind(parent)..., codomainind(parent)...)) +end +function Base.setindex!(t::AdjointTensorMap{T,S,N₁,N₂}, v, f₁::FusionTree{I,N₁}, f₂::FusionTree{I,N₂}) where {T,S,N₁,N₂,I} return copy!(getindex(t, f₁, f₂), v) end -@inline function Base.getindex(t::AdjointTrivialTensorMap) - return sreshape(StridedView(parent(t).data)', - (dims(codomain(t))..., dims(domain(t))...)) -end -@inline Base.setindex!(t::AdjointTrivialTensorMap, v) = copy!(getindex(t), v) - -@inline Base.getindex(t::AdjointTrivialTensorMap, ::Tuple{Nothing,Nothing}) = getindex(t) -@inline function Base.setindex!(t::AdjointTrivialTensorMap, v, ::Tuple{Nothing,Nothing}) - return setindex!(t, v) -end - -# For a tensor with trivial symmetry, allow direct indexing -@inline function Base.getindex(t::AdjointTrivialTensorMap, indices::Vararg{Int}) - data = t[] - @boundscheck checkbounds(data, indices) - @inbounds v = data[indices...] - return v -end -@inline function Base.setindex!(t::AdjointTrivialTensorMap, v, indices::Vararg{Int}) - data = t[] - @boundscheck checkbounds(data, indices) - @inbounds data[indices...] = v - return v -end - # Show #------ function Base.summary(io::IO, t::AdjointTensorMap) diff --git a/src/tensors/linalg.jl b/src/tensors/linalg.jl index fafa4726..7eec77a4 100644 --- a/src/tensors/linalg.jl +++ b/src/tensors/linalg.jl @@ -57,13 +57,20 @@ end Construct the identity endomorphism on space `V`, i.e. return a `t::TensorMap` with `domain(t) == codomain(t) == V`, where either `scalartype(t) = T` if `T` is a `Number` type -or `storagetype(t) = T` if `T` is a `DenseMatrix` type. +or `storagetype(t) = T` if `T` is a `DenseVector` type. """ id(V::TensorSpace) = id(Float64, V) -function id(::Type{A}, V::TensorSpace{S}) where {A<:MatOrNumber,S} +function id(::Type{A}, V::TensorSpace{S}) where {A,S} W = V ← V - N = length(domain(W)) - t = tensormaptype(S, N, N, A)(undef, codomain(W), domain(W)) + if A <: Number + t = TensorMap{A}(undef, W) + elseif A <: DenseVector + T = scalartype(A) + N = length(codomain(W)) + t = TensorMap{T,S,N,N,A}(undef, W) + else + throw(ArgumentError("`id` only supports Number or DenseVector subtypes as first argument")) + end return one!(t) end @@ -74,7 +81,7 @@ end Construct a specific isomorphism between the codomain and the domain, i.e. return a `t::TensorMap` where either `scalartype(t) = T` if `T` is a `Number` type or -`storagetype(t) = T` if `T` is a `DenseMatrix` type. If the spaces are not isomorphic, an +`storagetype(t) = T` if `T` is a `DenseVector` type. If the spaces are not isomorphic, an error will be thrown. !!! note @@ -83,10 +90,17 @@ error will be thrown. See also [`unitary`](@ref) when `InnerProductStyle(cod) === EuclideanProduct()`. """ -function isomorphism(::Type{A}, V::TensorMapSpace{S,N₁,N₂}) where {A<:MatOrNumber,S,N₁,N₂} +function isomorphism(::Type{A}, V::TensorMapSpace{S,N₁,N₂}) where {A<:VecOrNumber,S,N₁,N₂} codomain(V) ≅ domain(V) || throw(SpaceMismatch("codomain and domain are not isomorphic: $V")) - t = tensormaptype(S, N₁, N₂, A)(undef, codomain(V), domain(V)) + if A <: Number + t = TensorMap{A}(undef, V) + elseif A <: DenseVector + T = scalartype(A) + t = TensorMap{T,S,N₁,N₂,A}(undef, V) + else + throw(ArgumentError("`isomorphism` only supports Number or DenseVector subtypes as first argument")) + end for (_, b) in blocks(t) MatrixAlgebra.one!(b) end @@ -100,7 +114,7 @@ end Construct a specific unitary morphism between the codomain and the domain, i.e. return a `t::TensorMap` where either `scalartype(t) = T` if `T` is a `Number` type or -`storagetype(t) = T` if `T` is a `DenseMatrix` type. If the spaces are not isomorphic, or +`storagetype(t) = T` if `T` is a `DenseVector` type. If the spaces are not isomorphic, or the spacetype does not have a Euclidean inner product, an error will be thrown. !!! note @@ -109,7 +123,7 @@ the spacetype does not have a Euclidean inner product, an error will be thrown. See also [`isomorphism`](@ref) and [`isometry`](@ref). """ -function unitary(::Type{A}, V::TensorMapSpace{S,N₁,N₂}) where {A<:MatOrNumber,S,N₁,N₂} +function unitary(::Type{A}, V::TensorMapSpace{S,N₁,N₂}) where {A<:VecOrNumber,S,N₁,N₂} InnerProductStyle(S) === EuclideanProduct() || throw_invalid_innerproduct(:unitary) return isomorphism(A, V) end @@ -121,17 +135,24 @@ end Construct a specific isometry between the codomain and the domain, i.e. return a `t::TensorMap` where either `scalartype(t) = T` if `T` is a `Number` type or -`storagetype(t) = T` if `T` is a `DenseMatrix` type. The isometry `t` then satisfies +`storagetype(t) = T` if `T` is a `DenseVector` type. The isometry `t` then satisfies `t' * t = id(domain)` and `(t * t')^2 = t * t'`. If the spaces do not allow for such an isometric inclusion, an error will be thrown. See also [`isomorphism`](@ref) and [`unitary`](@ref). """ -function isometry(::Type{A}, V::TensorMapSpace{S,N₁,N₂}) where {A<:MatOrNumber,S,N₁,N₂} +function isometry(::Type{A}, V::TensorMapSpace{S,N₁,N₂}) where {A<:VecOrNumber,S,N₁,N₂} InnerProductStyle(S) === EuclideanProduct() || throw_invalid_innerproduct(:isometry) domain(V) ≾ codomain(V) || throw(SpaceMismatch("$V does not allow for an isometric inclusion")) - t = tensormaptype(S, N₁, N₂, A)(undef, codomain(V), domain(V)) + if A <: Number + t = TensorMap{A}(undef, V) + elseif A <: DenseVector + T = scalartype(A) + t = TensorMap{T,S,N₁,N₂,A}(undef, V) + else + throw(ArgumentError("`isometry` only supports Number or DenseVector subtypes as first argument")) + end for (_, b) in blocks(t) MatrixAlgebra.one!(b) end @@ -144,7 +165,7 @@ for morphism in (:isomorphism, :unitary, :isometry) $morphism(V::TensorMapSpace) = $morphism(Float64, V) $morphism(codomain::TensorSpace, domain::TensorSpace) = $morphism(codomain ← domain) function $morphism(::Type{T}, codomain::TensorSpace, - domain::TensorSpace) where {T<:MatOrNumber} + domain::TensorSpace) where {T<:VecOrNumber} return $morphism(T, codomain ← domain) end $morphism(t::AbstractTensorMap) = $morphism(storagetype(t), space(t)) @@ -172,15 +193,11 @@ LinearAlgebra.isdiag(t::AbstractTensorMap) = all(LinearAlgebra.isdiag, values(bl # Copy, adjoint! and fill: function Base.copy!(tdst::AbstractTensorMap, tsrc::AbstractTensorMap) space(tdst) == space(tsrc) || throw(SpaceMismatch("$(space(tdst)) ≠ $(space(tsrc))")) - for c in blocksectors(tdst) - copy!(StridedView(block(tdst, c)), StridedView(block(tsrc, c))) - end + copy!(tdst.data, tsrc.data) return tdst end function Base.fill!(t::AbstractTensorMap, value::Number) - for (c, b) in blocks(t) - fill!(b, value) - end + fill!(t.data, value) return t end function LinearAlgebra.adjoint!(tdst::AbstractTensorMap, @@ -269,32 +286,27 @@ function LinearAlgebra.mul!(tC::AbstractTensorMap, A = block(tA, c) B = block(tB, c) C = block(tC, c) - mul!(StridedView(C), StridedView(A), StridedView(B), α, β) + mul!(C, A, B, α, β) elseif β != one(β) rmul!(block(tC, c), β) end end return tC end -# TODO: reconsider wrapping the blocks in a StridedView, consider spawning threads for different blocks +# TODO: consider spawning threads for different blocks, support backends # TensorMap inverse function Base.inv(t::AbstractTensorMap) cod = codomain(t) dom = domain(t) - for c in union(blocksectors(cod), blocksectors(dom)) - blockdim(cod, c) == blockdim(dom, c) || - throw(SpaceMismatch("codomain $cod and domain $dom are not isomorphic: no inverse")) - end - if sectortype(t) === Trivial - return TensorMap(inv(block(t, Trivial())), domain(t) ← codomain(t)) - else - data = empty(t.data) - for (c, b) in blocks(t) - data[c] = inv(b) - end - return TensorMap(data, domain(t) ← codomain(t)) + cod ≅ dom || + throw(SpaceMismatch("codomain $cod and domain $dom are not isomorphic: no inverse")) + tinv = TensorMap{scalartype(t)}(undef, dom, cod) + for (c, b) in blocks(t) + binv = MatrixAlgebra.one!(block(tinv, c)) + ldiv!(lu(b), binv) end + return tinv end function LinearAlgebra.pinv(t::AbstractTensorMap; kwargs...) if sectortype(t) === Trivial diff --git a/src/tensors/tensor.jl b/src/tensors/tensor.jl index c9acc3dd..b97c6c94 100644 --- a/src/tensors/tensor.jl +++ b/src/tensors/tensor.jl @@ -3,142 +3,105 @@ #==========================================================# #! format: off """ - struct TensorMap{T, S<:IndexSpace, N₁, N₂, ...} <: AbstractTensorMap{T, S, N₁, N₂} + struct TensorMap{T, S<:IndexSpace, N₁, N₂, A<:DenseVector{T}} <: AbstractTensorMap{T, S, N₁, N₂} Specific subtype of [`AbstractTensorMap`](@ref) for representing tensor maps (morphisms in -a tensor category) whose data is stored in blocks of some subtype of `DenseMatrix`. +a tensor category), where the data is stored in a dense vector. """ -struct TensorMap{T, S<:IndexSpace, N₁, N₂, I<:Sector, A<:Union{<:DenseMatrix{T},SectorDict{I,<:DenseMatrix{T}}}, - F₁, F₂} <: AbstractTensorMap{T, S, N₁, N₂} +struct TensorMap{T, S<:IndexSpace, N₁, N₂, A<:DenseVector{T}} <: AbstractTensorMap{T, S, N₁, N₂} data::A - codom::ProductSpace{S,N₁} - dom::ProductSpace{S,N₂} - rowr::SectorDict{I,FusionTreeDict{F₁,UnitRange{Int}}} - colr::SectorDict{I,FusionTreeDict{F₂,UnitRange{Int}}} + space::TensorMapSpace{S,N₁,N₂} # uninitialized constructors - function TensorMap{T,S,N₁,N₂,Trivial,A,Nothing,Nothing}(::UndefInitializer, codom::ProductSpace{S,N₁}, dom::ProductSpace{S,N₂}) where {T,S<:IndexSpace,N₁,N₂,A<:DenseMatrix{T}} - data = A(undef, dim(codom), dim(dom)) - return TensorMap{T,S,N₁,N₂,Trivial,A,Nothing,Nothing}(data, codom, dom) - end - function TensorMap{T,S,N₁,N₂,I,A,F₁,F₂}(::UndefInitializer, codom::TensorSpace{S}, - dom::TensorSpace{S}) where {T,S<:IndexSpace,N₁,N₂, - I<:Sector,A<:SectorDict{I,<:DenseMatrix{T}},F₁,F₂} - I === sectortype(S) || throw(SectorMismatch()) - blocksectoriterator = blocksectors(codom ← dom) - rowr, rowdims = _buildblockstructure(codom, blocksectoriterator) - colr, coldims = _buildblockstructure(dom, blocksectoriterator) - data = SectorDict(c => valtype(A)(undef, rowdims[c], coldims[c]) for c in blocksectoriterator) - return TensorMap{T,S,N₁,N₂,I,A,F₁,F₂}(data, codom, dom, rowr, colr) + function TensorMap{T,S,N₁,N₂,A}(::UndefInitializer, space::TensorMapSpace{S,N₁,N₂}) where {T,S<:IndexSpace,N₁,N₂,A<:DenseVector{T}} + d = tensorstructure(space).totaldim + data = A(undef, d) + return TensorMap{T,S,N₁,N₂,A}(data, space) end # constructors from data - function TensorMap{T,S,N₁,N₂,I,A,F₁,F₂}(data::A, - codom::ProductSpace{S,N₁}, dom::ProductSpace{S,N₂}, - rowr::SectorDict{I,FusionTreeDict{F₁,UnitRange{Int}}}, - colr::SectorDict{I,FusionTreeDict{F₂,UnitRange{Int}}}) where - {T,S<:IndexSpace, N₁, N₂, I<:Sector, A<:SectorDict{I,<:DenseMatrix{T}}, - F₁<:FusionTree{I,N₁}, F₂<:FusionTree{I,N₂}} - T ⊆ field(S) || @warn("scalartype(data) = $T ⊈ $(field(S)))", maxlog = 1) - return new{T,S,N₁,N₂,I,A,F₁,F₂}(data, codom, dom, rowr, colr) - end - function TensorMap{T,S,N₁,N₂,Trivial,A,Nothing,Nothing}(data::A, - codom::ProductSpace{S,N₁}, - dom::ProductSpace{S,N₂}) where - {T,S<:IndexSpace,N₁,N₂,A<:DenseMatrix{T}} + function TensorMap{T,S,N₁,N₂,A}(data::A, space::TensorMapSpace{S,N₁,N₂}) where {T,S<:IndexSpace,N₁,N₂,A<:DenseVector{T}} T ⊆ field(S) || @warn("scalartype(data) = $T ⊈ $(field(S)))", maxlog = 1) - return new{T,S,N₁,N₂,Trivial,A,Nothing,Nothing}(data, codom, dom) + return new{T,S,N₁,N₂,A}(data, space) end end #! format: on """ - Tensor{T, S, N, I, A, F₁, F₂} = TensorMap{T, S, N, 0, I, A, F₁, F₂} + Tensor{T, S, N, A<:DenseVector{T}} = TensorMap{T, S, N, 0, A} Specific subtype of [`AbstractTensor`](@ref) for representing tensors whose data is stored -in blocks of some subtype of `DenseMatrix`. +in a dense vector. -A `Tensor{T, S, N, I, A, F₁, F₂}` is actually a special case `TensorMap{T, S, N, 0, I, A, F₁, F₂}`, +A `Tensor{T, S, N, A}` is actually a special case `TensorMap{T, S, N, 0, A}`, i.e. a tensor map with only a non-trivial output space. """ -const Tensor{T,S,N,I,A,F₁,F₂} = TensorMap{T,S,N,0,I,A,F₁,F₂} - -""" - TrivialTensorMap{T, S, N₁, N₂, A<:DenseMatrix} = TensorMap{T, S, N₁, N₂, Trivial, - A, Nothing, Nothing} - -A special case of [`TensorMap`](@ref) for representing tensor maps with trivial symmetry, -i.e., whose `sectortype` is `Trivial`. -""" -const TrivialTensorMap{T,S,N₁,N₂,A<:DenseMatrix} = TensorMap{T,S,N₁,N₂,Trivial,A,Nothing, - Nothing} - -""" - TrivialTensor{T, S, N, A} = TrivialTensorMap{T, S, N, 0, A} +const Tensor{T,S,N,A} = TensorMap{T,S,N,0,A} -A special case of [`Tensor`](@ref) for representing tensors with trivial symmetry, i.e., -whose `sectortype` is `Trivial`. -""" -const TrivialTensor{T,S,N,A} = TrivialTensorMap{T,S,N,0,A} - -# TODO: check if argument order should change -""" - tensormaptype(::Type{S}, N₁::Int, N₂::Int, [::Type{T}]) where {S<:IndexSpace,T} -> ::Type{<:TensorMap} - -Return the fully specified type of a tensor map with elementary space `S`, `N₁` output -spaces and `N₂` input spaces, either with scalar type `T` or with storage type `T`. -""" -function tensormaptype(::Type{S}, N₁::Int, N₂::Int, - ::Type{TorA}) where {S,TorA<:MatOrNumber} - I = sectortype(S) - if TorA <: DenseMatrix - M = TorA - T = scalartype(TorA) - elseif TorA <: Number - M = Matrix{TorA} - T = TorA - else - throw(ArgumentError("the final argument of `tensormaptype` should either be the scalar or the storage type, i.e. a subtype of `Number` or of `DenseMatrix`")) - end - if I === Trivial - return TensorMap{T,S,N₁,N₂,I,M,Nothing,Nothing} +# TODO: should we deprecate this? +# It seems quite heavily used in MPSKit.jl, although they are all of the form +# `tensormaptype(S, N₁, N₂, A)` and so could thus be replaced with `TensorMap{S, N₁, N₂, A}` +function tensormaptype(S::Type{<:IndexSpace}, N₁, N₂, TorA::Type) + if TorA <: Number + return TensorMap{TorA, S, N₁, N₂, Vector{TorA}} + elseif TorA <: DenseVector + return TensorMap{scalartype(TorA), S, N₁, N₂, TorA} else - F₁ = fusiontreetype(I, N₁) - F₂ = fusiontreetype(I, N₂) - return TensorMap{T,S,N₁,N₂,I,SectorDict{I,M},F₁,F₂} + throw(ArgumentError("invalid type for TensorMap data: $TorA")) end end -tensormaptype(S, N₁, N₂=0) = tensormaptype(S, N₁, N₂, Float64) # Basic methods for characterising a tensor: #-------------------------------------------- -codomain(t::TensorMap) = t.codom -domain(t::TensorMap) = t.dom - -blocksectors(t::TrivialTensorMap) = OneOrNoneIterator(dim(t) != 0, Trivial()) -blocksectors(t::TensorMap) = keys(t.data) +space(t::TensorMap) = t.space """ - storagetype(::Union{T,Type{T}}) where {T<:TensorMap} -> Type{A<:DenseMatrix} + storagetype(::Union{T,Type{T}}) where {T<:TensorMap} -> Type{A<:DenseVector} Return the type of the storage `A` of the tensor map. """ -function storagetype(::Type{<:TrivialTensorMap{T,S,N₁,N₂,A}}) where - {T,S,N₁,N₂,A} - return A -end -function storagetype(::Type{<:TensorMap{T,S,N₁,N₂,I,<:SectorDict{I,A}}}) where - {T,S,N₁,N₂,I<:Sector,A<:DenseMatrix} - return A -end +storagetype(::Type{<:TensorMap{T,S,N₁,N₂,A}}) where {T,S,N₁,N₂,A<:DenseVector{T}} = A -dim(t::TensorMap) = mapreduce(x -> length(x[2]), +, blocks(t); init=0) +dim(t::TensorMap) = length(t.data) # General TensorMap constructors #-------------------------------- +# undef constructors +""" + TensorMap{T}(undef, codomain::ProductSpace{S,N₁}, domain::ProductSpace{S,N₂}) + where {T,S,N₁,N₂} + TensorMap{T}(undef, codomain ← domain) + TensorMap{T}(undef, domain → codomain) + # expert mode: select storage type `A` + TensorMap{T,S,N₁,N₂,A}(undef, codomain::ProductSpace{S,N₁}, domain::ProductSpace{S,N₂}) + +Construct a `TensorMap` with uninitialized data. +""" +function TensorMap{T}(::UndefInitializer, V::TensorMapSpace{S,N₁,N₂}) where {T,S,N₁,N₂} + return TensorMap{T,S,N₁,N₂,Vector{T}}(undef, V) +end +function TensorMap{T}(::UndefInitializer, codomain::TensorSpace{S}, + domain::TensorSpace{S}) where {T,S} + return TensorMap{T}(undef, codomain ← domain) +end +function Tensor{T}(::UndefInitializer, V::TensorSpace{S}) where {T,S} + return TensorMap{T}(undef, V ← one(V)) +end + +# constructor starting from vector = independent data (N₁ + N₂ = 1 is special cased below) +# documentation is captured by the case where `data` is a general array +# here, we force the `T` argument to distinguish it from the more general constructor below +function TensorMap{T}(data::A, V::TensorMapSpace{S,N₁,N₂}) where {T,S,N₁,N₂,A<:DenseVector{T}} + return TensorMap{T,S,N₁,N₂,A}(data, V) +end +function TensorMap{T}(data::DenseVector{T}, codomain::TensorSpace{S}, + domain::TensorSpace{S}) where {T, S} + return TensorMap(data, codomain ← domain) +end + # constructor starting from block data """ - TensorMap(data::AbstractDict{<:Sector,<:DenseMatrix}, codomain::ProductSpace{S,N₁}, + TensorMap(data::AbstractDict{<:Sector,<:AbstractMatrix}, codomain::ProductSpace{S,N₁}, domain::ProductSpace{S,N₂}) where {S<:ElementarySpace,N₁,N₂} TensorMap(data, codomain ← domain) TensorMap(data, domain → codomain) @@ -146,9 +109,8 @@ dim(t::TensorMap) = mapreduce(x -> length(x[2]), +, blocks(t); init=0) Construct a `TensorMap` by explicitly specifying its block data. ## Arguments -- `data::AbstractDict{<:Sector,<:DenseMatrix}`: dictionary containing the block data for - each coupled sector `c` as a `DenseMatrix` of size - `(blockdim(codomain, c), blockdim(domain, c))`. +- `data::AbstractDict{<:Sector,<:AbstractMatrix}`: dictionary containing the block data for + each coupled sector `c` as a matrix of size `(blockdim(codomain, c), blockdim(domain, c))`. - `codomain::ProductSpace{S,N₁}`: the codomain as a `ProductSpace` of `N₁` spaces of type `S<:ElementarySpace`. - `domain::ProductSpace{S,N₂}`: the domain as a `ProductSpace` of `N₂` spaces of type @@ -157,87 +119,28 @@ Construct a `TensorMap` by explicitly specifying its block data. Alternatively, the domain and codomain can be specified by passing a [`HomSpace`](@ref) using the syntax `codomain ← domain` or `domain → codomain`. """ -function TensorMap(data::AbstractDict{<:Sector,<:DenseMatrix}, +function TensorMap(data::AbstractDict{<:Sector,<:AbstractMatrix}, V::TensorMapSpace{S,N₁,N₂}) where {S,N₁,N₂} - dom = domain(V) - codom = codomain(V) - I = sectortype(S) - I == keytype(data) || throw(SectorMismatch()) - if I == Trivial - if dim(dom) != 0 && dim(codom) != 0 - return TensorMap(data[Trivial()], codom, dom) - else - return TensorMap(valtype(data)(undef, dim(codom), dim(dom)), codom, dom) - end - end - blocksectoriterator = blocksectors(codom ← dom) - for c in blocksectoriterator + + T = eltype(valtype(data)) + t = TensorMap{T}(undef, V) + for (c, b) in blocks(t) haskey(data, c) || throw(SectorMismatch("no data for block sector $c")) + datac = data[c] + size(datac) == size(b) || throw(DimensionMismatch("wrong size of block for sector $c")) + copy!(b, datac) end - rowr, rowdims = _buildblockstructure(codom, blocksectoriterator) - colr, coldims = _buildblockstructure(dom, blocksectoriterator) for (c, b) in data - c in blocksectoriterator || isempty(b) || + c ∈ blocksectors(t) || isempty(b) || throw(SectorMismatch("data for block sector $c not expected")) - isempty(b) || size(b) == (rowdims[c], coldims[c]) || - throw(DimensionMismatch("wrong size of block for sector $c")) - end - data2 = if isreal(I) - SectorDict(c => data[c] for c in blocksectoriterator) - else - SectorDict(c => complex(data[c]) for c in blocksectoriterator) end - TT = tensormaptype(S, N₁, N₂, valtype(data)) - return TT(data2, codom, dom, rowr, colr) + return t end -function TensorMap(data::AbstractDict{<:Sector,<:DenseMatrix}, codom::TensorSpace{S}, +function TensorMap(data::AbstractDict{<:Sector,<:AbstractMatrix}, codom::TensorSpace{S}, dom::TensorSpace{S}) where {S} return TensorMap(data, codom ← dom) end -""" - TensorMap{T}(undef, codomain::ProductSpace{S,N₁}, domain::ProductSpace{S,N₂}) - where {T,S,N₁,N₂} - TensorMap{T}(undef, codomain ← domain) - TensorMap{T}(undef, domain → codomain) - # expert mode: select storage type `A` - TensorMap{T,S,N₁,N₂,I,A}(undef, codomain::ProductSpace{S,N₁}, domain::ProductSpace{S,N₂}) - -Construct a `TensorMap` with uninitialized data. -""" -function TensorMap{T}(::UndefInitializer, V::TensorMapSpace{S,N₁,N₂}) where {T,S,N₁,N₂} - TT = tensormaptype(S, N₁, N₂, T) # construct full type - return TT(undef, codomain(V), domain(V)) -end -function TensorMap{T}(::UndefInitializer, codomain::TensorSpace{S}, - domain::TensorSpace{S}) where {T,S} - return TensorMap{T}(undef, codomain ← domain) -end -function Tensor{T}(::UndefInitializer, V::TensorSpace{S}) where {T,S} - return TensorMap{T}(undef, V ← one(V)) -end - -# auxiliary function -function _buildblockstructure(P::ProductSpace{S,N}, blocksectors) where {S<:IndexSpace,N} - I = sectortype(S) - F = fusiontreetype(I, N) - treeranges = SectorDict{I,FusionTreeDict{F,UnitRange{Int}}}() - blockdims = SectorDict{I,Int}() - for c in blocksectors - offset = 0 - treerangesc = FusionTreeDict{F,UnitRange{Int}}() - for f in fusiontrees(P, c) - s = f.uncoupled - r = (offset + 1):(offset + dim(P, s)) - treerangesc[f] = r - offset = last(r) - end - treeranges[c] = treerangesc - blockdims[c] = offset - end - return treeranges, blockdims -end - @doc """ zeros([T=Float64,], codomain::ProductSpace{S,N₁}, domain::ProductSpace{S,N₂}) where {S,N₁,N₂,T} zeros([T=Float64,], codomain ← domain) @@ -346,9 +249,9 @@ for randfun in (:rand, :randn, :randexp) end end -# constructor starting from a dense array +# constructor starting from an AbstractArray """ - TensorMap(data::DenseArray, codomain::ProductSpace{S,N₁}, domain::ProductSpace{S,N₂}; + TensorMap(data::AbstractArray, codomain::ProductSpace{S,N₁}, domain::ProductSpace{S,N₂}; tol=sqrt(eps(real(float(eltype(data)))))) where {S<:ElementarySpace,N₁,N₂} TensorMap(data, codomain ← domain; tol=sqrt(eps(real(float(eltype(data)))))) TensorMap(data, domain → codomain; tol=sqrt(eps(real(float(eltype(data)))))) @@ -363,50 +266,71 @@ Construct a `TensorMap` from a plain multidimensional array. `S<:ElementarySpace`. - `tol=sqrt(eps(real(float(eltype(data)))))::Float64`: -Here, `data` can be specified in two ways. It can either be a `DenseArray` of rank `N₁ + N₂` -whose size matches that of the domain and codomain spaces, -`size(data) == (dims(codomain)..., dims(domain)...)`, or a `DenseMatrix` where -`size(data) == (dim(codomain), dim(domain))`. The `TensorMap` constructor will then -reconstruct the tensor data such that the resulting tensor `t` satisfies -`data == convert(Array, t)`. For the case where `sectortype(S) == Trivial`, the `data` array -is simply reshaped into matrix form and referred to as such in the resulting `TensorMap` -instance. When `S<:GradedSpace`, the `data` array has to be compatible with how how each -sector in every space `V` is assigned to an index range within `1:dim(V)`. - -Alternatively, the domain and codomain can be specified by passing a [`HomSpace`](@ref) -using the syntax `codomain ← domain` or `domain → codomain`. +Here, `data` can be specified in three ways: +1) `data` can be a `DenseVector` of length `dim(codomain ← domain)`; in that case it represents + the actual independent entries of the tensor map. An instance will be created that directly + references `data`. +2) `data` can be an `AbstractMatrix` of size `(dim(codomain), dim(domain))` +3) `data` can be an `AbstractArray` of rank `N₁ + N₂` with a size matching that of the domain + and codomain spaces, i.e. `size(data) == (dims(codomain)..., dims(domain)...)` + +In case 2 and 3, the `TensorMap` constructor will reconstruct the tensor data such that the +resulting tensor `t` satisfies `data == convert(Array, t)`, up to an error specified by `tol`. +For the case where `sectortype(S) == Trivial` and `data isa DenseArray`, the `data` array is +simply reshaped into a vector and used as in case 1 so that the memory will still be shared. +In other cases, new memory will be allocated. + +Note that in the case of `N₁ + N₂ = 1`, case 3 also amounts to `data` being a vector, whereas +when `N₁ + N₂ == 2`, case 2 and case 3 both require `data` to be a matrix. Such ambiguous cases +are resolved by checking the size of `data` in an attempt to capture all support all possible +cases. !!! note - This constructor only works for `sectortype` values for which conversion to a plain - array is possible, and only in the case where the `data` actually respects the specified - symmetry structure. + This constructor for case 2 and 3 only works for `sectortype` values for which conversion + to a plain array is possible, and only in the case where the `data` actually respects + the specified symmetry structure, up to a tolerance `tol`. """ -function TensorMap(data::DenseArray, V::TensorMapSpace{S,N₁,N₂}; +function TensorMap(data::AbstractArray, V::TensorMapSpace{S,N₁,N₂}; tol=sqrt(eps(real(float(eltype(data)))))) where {S<:IndexSpace,N₁,N₂} + + T = eltype(data) + if ndims(data) == 1 && length(data) == dim(V) + if data isa DenseVector # refer to specific data-capturing constructor + return TensorMap{T}(data, V) + else + return TensorMap{T}(collect(data), V) + end + end + + # dimension check codom = codomain(V) dom = domain(V) - (d1, d2) = (dim(codom), dim(dom)) - if !(length(data) == d1 * d2 || size(data) == (d1, d2) || - size(data) == (dims(codom)..., dims(dom)...)) + arraysize = (dims(codom)..., dims(dom)...) + matsize = (dim(codom), dim(dom)) + + if !(size(data) == arraysize || size(data) == matsize) throw(DimensionMismatch()) end - if sectortype(S) === Trivial - data2 = reshape(data, (d1, d2)) - A = typeof(data2) - return tensormaptype(S, N₁, N₂, A)(data2, codom, dom) + if sectortype(S) === Trivial # refer to same method, but now with vector argument + return TensorMap(reshape(data, length(data)), V) end - t = TensorMap{eltype(data)}(undef, codom, dom) - data2 = reshape(data, (dims(codom)..., dims(dom)...)) - project_symmetric!(t, data2) - - if !isapprox(data2, convert(Array, t); atol=tol) + t = TensorMap{T}(undef, codom, dom) + arraydata = reshape(collect(data), arraysize) + t = project_symmetric!(t, arraydata) + if !isapprox(arraydata, convert(Array, t); atol=tol) throw(ArgumentError("Data has non-zero elements at incompatible positions")) end - return t end +function TensorMap(data::AbstractArray, codom::TensorSpace{S}, + dom::TensorSpace{S}; kwargs...) where {S} + return TensorMap(data, codom ← dom; kwargs...) +end +function Tensor(data::AbstractArray, codom::TensorSpace, ; kwargs...) + return TensorMap(data, codom ← one(codom); kwargs...) +end """ project_symmetric!(t::TensorMap, data::DenseArray) -> TensorMap @@ -415,102 +339,37 @@ Project the data from a dense array `data` into the tensor map `t`. This functio any data that does not fit the symmetry structure of `t`. """ function project_symmetric!(t::TensorMap, data::DenseArray) - if sectortype(t) === Trivial - copy!(t.data, reshape(data, size(t.data))) - return t - end - - for (f₁, f₂) in fusiontrees(t) - F = convert(Array, (f₁, f₂)) - b = zeros(eltype(data), dims(codomain(t), f₁.uncoupled)..., - dims(domain(t), f₂.uncoupled)...) - szbF = _interleave(size(b), size(F)) - dataslice = sreshape(StridedView(data)[axes(codomain(t), f₁.uncoupled)..., - axes(domain(t), f₂.uncoupled)...], szbF) - # project (can this be done in one go?) - d = inv(dim(f₁.coupled)) - for k in eachindex(b) - b[k] = 1 - projector = _kron(b, F) # probably possible to re-use memory - t[f₁, f₂][k] = dot(projector, dataslice) * d - b[k] = 0 + I = sectortype(t) + if I === Trivial # cannot happen when called from above, but for generality we keep this + copy!(t.data, reshape(data, length(t.data))) + else + for (f₁, f₂) in fusiontrees(t) + F = convert(Array, (f₁, f₂)) + dataslice = sview(data, axes(codomain(t), f₁.uncoupled)..., + axes(domain(t), f₂.uncoupled)...) + if FusionStyle(I) === UniqueFusion() + Fscalar = first(F) # contains a single element + scale!(t[f₁, f₂], dataslice, conj(Fscalar)) + else + subblock = t[f₁, f₂] + szbF = _interleave(size(F), size(subblock)) + indset1 = ntuple(identity, numind(t)) + indset2 = 2 .* indset1 + indset3 = indset2 .- 1 + TensorOperations.tensorcontract!(subblock, + F, ((), indset1), true, + sreshape(dataslice, szbF), (indset3, indset2), false, + (indset1, ()), + inv(dim(f₁.coupled)), false) + end end end - return t end -function TensorMap(data::DenseArray, codom::TensorSpace{S}, - dom::TensorSpace{S}; kwargs...) where {S} - return TensorMap(data, codom ← dom; kwargs...) -end # Efficient copy constructors #----------------------------- -Base.copy(t::TrivialTensorMap) = typeof(t)(copy(t.data), t.codom, t.dom) -Base.copy(t::TensorMap) = typeof(t)(deepcopy(t.data), t.codom, t.dom, t.rowr, t.colr) - -# specializations when data can be re-used -function Base.similar(t::TensorMap, ::Type{TorA}, - P::TensorMapSpace{S}) where {TorA<:MatOrNumber,S} - N₁ = length(codomain(P)) - N₂ = length(domain(P)) - I = sectortype(S) - - # speed up specialized cases - TT = tensormaptype(S, N₁, N₂, TorA) - I === Trivial && return TT(undef, codomain(P), domain(P)) - - if space(t) == P - data = if TorA <: Number - SectorDict(c => similar(b, TorA) for (c, b) in blocks(t)) - else - SectorDict(c => similar(TorA, size(b)) for (c, b) in blocks(t)) - end - return TT(data, codomain(P), domain(P), t.rowr, t.colr) - end - - blocksectoriterator = blocksectors(P) - # try to recycle rowr - if codomain(P) == codomain(t) && all(c -> haskey(t.rowr, c), blocksectoriterator) - if length(t.rowr) == length(blocksectoriterator) - rowr = t.rowr - else - rowr = SectorDict(c => t.rowr[c] for c in blocksectoriterator) - end - rowdims = SectorDict(c => size(block(t, c), 1) for c in blocksectoriterator) - elseif codomain(P) == domain(t) && all(c -> haskey(t.colr, c), blocksectoriterator) - if length(t.colr) == length(blocksectoriterator) - rowr = t.colr - else - rowr = SectorDict(c => t.colr[c] for c in blocksectoriterator) - end - rowdims = SectorDict(c => size(block(t, c), 2) for c in blocksectoriterator) - else - rowr, rowdims = _buildblockstructure(codomain(P), blocksectoriterator) - end - # try to recycle colr - if domain(P) == codomain(t) && all(c -> haskey(t.rowr, c), blocksectoriterator) - if length(t.rowr) == length(blocksectoriterator) - colr = t.rowr - else - colr = SectorDict(c => t.rowr[c] for c in blocksectoriterator) - end - coldims = SectorDict(c => size(block(t, c), 1) for c in blocksectoriterator) - elseif domain(P) == domain(t) && all(c -> haskey(t.colr, c), blocksectoriterator) - if length(t.colr) == length(blocksectoriterator) - colr = t.colr - else - colr = SectorDict(c => t.colr[c] for c in blocksectoriterator) - end - coldims = SectorDict(c => size(block(t, c), 2) for c in blocksectoriterator) - else - colr, coldims = _buildblockstructure(domain(P), blocksectoriterator) - end - M = storagetype(TT) - data = SectorDict{I,M}(c => M(undef, (rowdims[c], coldims[c])) - for c in blocksectoriterator) - return TT(data, codomain(P), domain(P), rowr, colr) -end +Base.copy(t::TensorMap) = typeof(t)(copy(t.data), t.space) function Base.complex(t::AbstractTensorMap) if scalartype(t) <: Complex @@ -547,69 +406,26 @@ function Base.convert(::Type{TensorMap}, d::Dict{Symbol,Any}) end end -# Getting and setting the data -#------------------------------ -hasblock(t::TrivialTensorMap, ::Trivial) = !isempty(t.data) -hasblock(t::TensorMap, s::Sector) = haskey(t.data, s) - -block(t::TrivialTensorMap, ::Trivial) = t.data +# Getting and setting the data at the block level +#------------------------------------------------- function block(t::TensorMap, s::Sector) sectortype(t) == typeof(s) || throw(SectorMismatch()) - A = valtype(t.data) - if haskey(t.data, s) - return t.data[s] - else # at least one of the two matrix dimensions will be zero - return storagetype(t)(undef, (blockdim(codomain(t), s), blockdim(domain(t), s))) + structure = tensorstructure(t).blockstructure + if haskey(structure, s) + (d₁, d₂), r = structure[s] + return reshape(view(t.data, r), (d₁, d₂)) + else # at least one of the two dimensions will be zero + d₁ = blockdim(codomain(t), s) + d₂ = blockdim(domain(t), s) + l = d₁ * d₂ + return reshape(view(storagetype(t)(undef, l), 1:l), (d₁, d₂)) end end -function blocks(t::TrivialTensorMap) - return SingletonDict(Trivial() => t.data) -end -blocks(t::TensorMap) = t.data - -fusiontrees(t::TrivialTensorMap) = ((nothing, nothing),) -fusiontrees(t::TensorMap) = TensorKeyIterator(t.rowr, t.colr) - -""" - Base.getindex(t::TensorMap - sectors::NTuple{N₁+N₂,I}) where {N₁,N₂,I<:Sector} - -> StridedViews.StridedView - t[sectors] - -Return a view into the data slice of `t` corresponding to the splitting - fusion tree pair -with combined uncoupled charges `sectors`. In particular, if `sectors == (s1..., s2...)` -where `s1` and `s2` correspond to the coupled charges in the codomain and domain -respectively, then a `StridedViews.StridedView` of size -`(dims(codomain(t), s1)..., dims(domain(t), s2))` is returned. - -This method is only available for the case where `FusionStyle(I) isa UniqueFusion`, -since it assumes a uniquely defined coupled charge. -""" -@inline function Base.getindex(t::TensorMap, sectors::Tuple{I,Vararg{I}}) where {I<:Sector} - I === sectortype(t) || throw(SectorMismatch("Not a valid sectortype for this tensor.")) - length(sectors) == numind(t) || - throw(ArgumentError("Number of sectors does not match.")) - FusionStyle(I) isa UniqueFusion || - throw(SectorMismatch("Indexing with sectors only possible if unique fusion")) - s1 = TupleTools.getindices(sectors, codomainind(t)) - s2 = map(dual, TupleTools.getindices(sectors, domainind(t))) - c1 = length(s1) == 0 ? one(I) : (length(s1) == 1 ? s1[1] : first(⊗(s1...))) - @boundscheck begin - c2 = length(s2) == 0 ? one(I) : (length(s2) == 1 ? s2[1] : first(⊗(s2...))) - c2 == c1 || throw(SectorMismatch("Not a valid sector for this tensor")) - hassector(codomain(t), s1) && hassector(domain(t), s2) - end - f₁ = FusionTree(s1, c1, map(isdual, tuple(codomain(t)...))) - f₂ = FusionTree(s2, c1, map(isdual, tuple(domain(t)...))) - @inbounds begin - return t[f₁, f₂] - end -end -@propagate_inbounds function Base.getindex(t::TensorMap, sectors::Tuple) - return t[map(sectortype(t), sectors)] -end +blocks(t::TensorMap) = SectorDict(c => block(t, c) for c in blocksectors(t)) # TODO: make iterator +# Indexing and getting and setting the data at the subblock level +#----------------------------------------------------------------- """ Base.getindex(t::TensorMap{T,S,N₁,N₂,I}, f₁::FusionTree{I,N₁}, @@ -624,20 +440,31 @@ Return a view into the data slice of `t` corresponding to the splitting - fusion represents the slice of `block(t, c)` whose row indices correspond to `f₁.uncoupled` and column indices correspond to `f₂.uncoupled`. """ -@inline function Base.getindex(t::TensorMap{T,S,N₁,N₂,I}, +@inline function Base.getindex(t::TensorMap{T,S,N₁,N₂}, f₁::FusionTree{I,N₁}, f₂::FusionTree{I,N₂}) where {T,S,N₁,N₂,I<:Sector} - c = f₁.coupled + structure = tensorstructure(t) @boundscheck begin - c == f₂.coupled || throw(SectorMismatch()) - haskey(t.rowr[c], f₁) || throw(SectorMismatch()) - haskey(t.colr[c], f₂) || throw(SectorMismatch()) + haskey(structure.fusiontreeindices, (f₁, f₂)) || throw(SectorMismatch()) end @inbounds begin + i = structure.fusiontreeindices[(f₁, f₂)] + c = f₁.coupled + (d₁, d₂), r = structure.blockstructure[c] + r₁, r₂ = structure.fusiontreeranges[i] d = (dims(codomain(t), f₁.uncoupled)..., dims(domain(t), f₂.uncoupled)...) - return sreshape(StridedView(t.data[c])[t.rowr[c][f₁], t.colr[c][f₂]], d) + return sreshape(sreshape(sview(t.data, r), (d₁, d₂))[r₁, r₂], d) end end +# The following is probably worth special casing for trivial tensors +@inline function Base.getindex(t::TensorMap{T,S,N₁,N₂}, + f₁::FusionTree{Trivial,N₁}, + f₂::FusionTree{Trivial,N₂}) where {T,S,N₁,N₂} + @boundscheck begin + sectortype(t) == Trivial || throw(SectorMismatch()) + end + return sreshape(StridedView(t.data), (dims(codomain(t))..., dims(domain(t))...)) +end """ Base.setindex!(t::TensorMap{T,S,N₁,N₂,I}, @@ -661,48 +488,43 @@ See also [`Base.getindex(::TensorMap{T,S,N₁,N₂,I<:Sector}, ::FusionTree{I<:S return copy!(getindex(t, f₁, f₂), v) end -# For a tensor with trivial symmetry, allow no argument indexing -""" - Base.getindex(t::TrivialTensorMap) - t[] - -Return a view into the data of `t` as a `StridedViews.StridedView` of size -`(dims(codomain(t))..., dims(domain(t))...)`. """ -@inline function Base.getindex(t::TrivialTensorMap) - return sreshape(StridedView(t.data), (dims(codomain(t))..., dims(domain(t))...)) -end -@inline Base.setindex!(t::TrivialTensorMap, v) = copy!(getindex(t), v) - -# For a tensor with trivial symmetry, fusiontrees returns (nothing,nothing) -@inline Base.getindex(t::TrivialTensorMap, ::Nothing, ::Nothing) = getindex(t) -@inline Base.setindex!(t::TrivialTensorMap, v, ::Nothing, ::Nothing) = setindex!(t, v) + Base.getindex(t::TensorMap + sectors::NTuple{N₁+N₂,I}) where {N₁,N₂,I<:Sector} + -> StridedViews.StridedView + t[sectors] -# For a tensor with trivial symmetry, allow direct indexing -""" - Base.getindex(t::TrivialTensorMap, indices::Vararg{Int}) - t[indices] +Return a view into the data slice of `t` corresponding to the splitting - fusion tree pair +with combined uncoupled charges `sectors`. In particular, if `sectors == (s₁..., s₂...)` +where `s₁` and `s₂` correspond to the coupled charges in the codomain and domain +respectively, then a `StridedViews.StridedView` of size +`(dims(codomain(t), s₁)..., dims(domain(t), s₂))` is returned. -Return a view into the data slice of `t` corresponding to `indices`, by slicing the -`StridedViews.StridedView` into the full data array. +This method is only available for the case where `FusionStyle(I) isa UniqueFusion`, +since it assumes a uniquely defined coupled charge. """ -@inline function Base.getindex(t::TrivialTensorMap, indices::Vararg{Int}) - data = t[] - @boundscheck checkbounds(data, indices...) - @inbounds v = data[indices...] - return v +@inline function Base.getindex(t::TensorMap, sectors::Tuple{I,Vararg{I}}) where {I<:Sector} + I === sectortype(t) || throw(SectorMismatch("Not a valid sectortype for this tensor.")) + FusionStyle(I) isa UniqueFusion || + throw(SectorMismatch("Indexing with sectors only possible if unique fusion")) + length(sectors) == numind(t) || + throw(ArgumentError("Number of sectors does not match.")) + s₁ = TupleTools.getindices(sectors, codomainind(t)) + s₂ = map(dual, TupleTools.getindices(sectors, domainind(t))) + c1 = length(s₁) == 0 ? one(I) : (length(s₁) == 1 ? s₁[1] : first(⊗(s₁...))) + @boundscheck begin + c2 = length(s₂) == 0 ? one(I) : (length(s₂) == 1 ? s₂[1] : first(⊗(s₂...))) + c2 == c1 || throw(SectorMismatch("Not a valid sector for this tensor")) + hassector(codomain(t), s₁) && hassector(domain(t), s₂) + end + f₁ = FusionTree(s₁, c1, map(isdual, tuple(codomain(t)...))) + f₂ = FusionTree(s₂, c1, map(isdual, tuple(domain(t)...))) + @inbounds begin + return t[f₁, f₂] + end end -""" - Base.setindex!(t::TrivialTensorMap, v, indices::Vararg{Int}) - t[indices] = v - -Assigns `v` to the data slice of `t` corresponding to `indices`. -""" -@inline function Base.setindex!(t::TrivialTensorMap, v, indices::Vararg{Int}) - data = t[] - @boundscheck checkbounds(data, indices...) - @inbounds data[indices...] = v - return v +@propagate_inbounds function Base.getindex(t::TensorMap, sectors::Tuple) + return t[map(sectortype(t), sectors)] end # Show @@ -741,8 +563,7 @@ function Base.real(t::AbstractTensorMap) # that the real/imaginary part of a tensor `t` can be obtained by just taking # real/imaginary part of the degeneracy data. if isreal(sectortype(t)) - realdata = Dict(k => real(v) for (k, v) in blocks(t)) - return TensorMap(realdata, codomain(t), domain(t)) + return TensorMap(real(t.data), codomain(t), domain(t)) else msg = "`real` has not been implemented for `$(typeof(t))`." throw(ArgumentError(msg)) @@ -754,8 +575,7 @@ function Base.imag(t::AbstractTensorMap) # that the real/imaginary part of a tensor `t` can be obtained by just taking # real/imaginary part of the degeneracy data. if isreal(sectortype(t)) - imagdata = Dict(k => imag(v) for (k, v) in blocks(t)) - return TensorMap(imagdata, codomain(t), domain(t)) + return TensorMap(imag(t.data), codomain(t), domain(t)) else msg = "`imag` has not been implemented for `$(typeof(t))`." throw(ArgumentError(msg)) @@ -769,14 +589,13 @@ function Base.convert(::Type{TensorMap}, t::AbstractTensorMap) return copy!(TensorMap{scalartype(t)}(undef, space(t)), t) end -function Base.convert(TT::Type{<:TensorMap{T,S,N₁,N₂}}, - t::AbstractTensorMap{<:Any,S,N₁,N₂}) where {T,S,N₁,N₂} +function Base.convert(TT::Type{TensorMap{T,S,N₁,N₂,A}}, + t::AbstractTensorMap{<:Any,S,N₁,N₂}) where {T,S,N₁,N₂,A} if typeof(t) === TT return t else - data = Dict{sectortype(TT),storagetype(TT)}(c => convert(storagetype(TT), b) - for (c, b) in blocks(t)) - return TensorMap(data, codomain(t), domain(t)) + data = convert(A, t.data) + return TensorMap(data, space(t)) end end @@ -784,5 +603,7 @@ function Base.promote_rule(::Type{<:TT₁}, ::Type{<:TT₂}) where {S,N₁,N₂, TT₁<:TensorMap{<:Any,S,N₁,N₂}, TT₂<:TensorMap{<:Any,S,N₁,N₂}} - return tensormaptype(S, N₁, N₂, promote_type(storagetype(TT₁), storagetype(TT₂))) + T = promote_type(scalartype(TT₁), scalartype(TT₂)) + A = promote_type(storagetype(TT₁), storagetype(TT₂)) + return TensorMap{T, S, N₁, N₂, A} end diff --git a/src/tensors/vectorinterface.jl b/src/tensors/vectorinterface.jl index 95c99fb2..bb5608d2 100644 --- a/src/tensors/vectorinterface.jl +++ b/src/tensors/vectorinterface.jl @@ -8,9 +8,7 @@ function VectorInterface.zerovector(t::AbstractTensorMap, ::Type{S}) where {S<:N return zerovector!(similar(t, S)) end function VectorInterface.zerovector!(t::AbstractTensorMap) - for (c, b) in blocks(t) - zerovector!(b) - end + zerovector!(t.data) return t end VectorInterface.zerovector!!(t::AbstractTensorMap) = zerovector!(t) @@ -22,9 +20,7 @@ function VectorInterface.scale(t::AbstractTensorMap, α::Number) return scale!(similar(t, T), t, α) end function VectorInterface.scale!(t::AbstractTensorMap, α::Number) - for (c, b) in blocks(t) - scale!(b, α) - end + scale!(t.data, α) return t end function VectorInterface.scale!!(t::AbstractTensorMap, α::Number) @@ -34,9 +30,7 @@ function VectorInterface.scale!!(t::AbstractTensorMap, α::Number) end function VectorInterface.scale!(ty::AbstractTensorMap, tx::AbstractTensorMap, α::Number) space(ty) == space(tx) || throw(SpaceMismatch("$(space(ty)) ≠ $(space(tx))")) - for c in blocksectors(tx) - scale!(block(ty, c), block(tx, c), α) - end + scale!(ty.data, tx.data, α) return ty end function VectorInterface.scale!!(ty::AbstractTensorMap, tx::AbstractTensorMap, α::Number) @@ -55,14 +49,12 @@ function VectorInterface.add(ty::AbstractTensorMap, tx::AbstractTensorMap, α::Number, β::Number) space(ty) == space(tx) || throw(SpaceMismatch("$(space(ty)) ≠ $(space(tx))")) T = VectorInterface.promote_add(ty, tx, α, β) - return VectorInterface.add!(scale!(similar(ty, T), ty, β), tx, α) + return add!(scale!(similar(ty, T), ty, β), tx, α) end function VectorInterface.add!(ty::AbstractTensorMap, tx::AbstractTensorMap, α::Number, β::Number) space(ty) == space(tx) || throw(SpaceMismatch("$(space(ty)) ≠ $(space(tx))")) - for c in blocksectors(tx) - VectorInterface.add!(block(ty, c), block(tx, c), α, β) - end + add!(ty.data, tx.data, α, β) return ty end function VectorInterface.add!!(ty::AbstractTensorMap, tx::AbstractTensorMap, @@ -70,9 +62,9 @@ function VectorInterface.add!!(ty::AbstractTensorMap, tx::AbstractTensorMap, # spacecheck is done in add(!) T = VectorInterface.promote_add(ty, tx, α, β) if T <: scalartype(ty) - return VectorInterface.add!(ty, tx, α, β) + return add!(ty, tx, α, β) else - return VectorInterface.add(ty, tx, α, β) + return add(ty, tx, α, β) end end @@ -84,7 +76,7 @@ function VectorInterface.inner(tx::AbstractTensorMap, ty::AbstractTensorMap) T = VectorInterface.promote_inner(tx, ty) s = zero(T) for c in blocksectors(tx) - s += convert(T, dim(c)) * dot(block(tx, c), block(ty, c)) + s += convert(T, dim(c)) * inner(block(tx, c), block(ty, c)) end return s end diff --git a/test/runtests.jl b/test/runtests.jl index 34037d27..f8fd2b93 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -54,19 +54,19 @@ sectorlist = (Z2Irrep, Z3Irrep, Z4Irrep, Z3Irrep ⊠ Z4Irrep, Z2Irrep ⊠ FibonacciAnyon ⊠ FibonacciAnyon) Ti = time() -include("fusiontrees.jl") -include("spaces.jl") +# include("fusiontrees.jl") +# include("spaces.jl") include("tensors.jl") -include("planar.jl") -include("ad.jl") -include("bugfixes.jl") +# include("planar.jl") +# include("ad.jl") +# include("bugfixes.jl") Tf = time() printstyled("Finished all tests in ", string(round((Tf - Ti) / 60; sigdigits=3)), " minutes."; bold=true, color=Base.info_color()) println() -@testset "Aqua" verbose = true begin - using Aqua - Aqua.test_all(TensorKit) -end +# @testset "Aqua" verbose = true begin +# using Aqua +# Aqua.test_all(TensorKit) +# end diff --git a/test/tensors.jl b/test/tensors.jl index a58b4900..09512364 100644 --- a/test/tensors.jl +++ b/test/tensors.jl @@ -90,7 +90,7 @@ for V in spacelist @test codomain(t) == W @test space(t) == (W ← one(W)) @test domain(t) == one(W) - @test typeof(t) == @constinferred tensormaptype(spacetype(t), 5, 0, T) + @test typeof(t) == TensorMap{T, spacetype(t), 5, 0, Vector{T}} end end @timedtestset "Tensor Dict conversion" begin @@ -101,23 +101,30 @@ for V in spacelist @test t == convert(TensorMap, d) end end - if hasfusiontensor(I) + if hasfusiontensor(I) || I == Trivial @timedtestset "Tensor Array conversion" begin - W = V1 ⊗ V2 ⊗ V3 ← V4 ⊗ V5 - for T in (Int, Float32, ComplexF64) - if T == Int - t = TensorMap{T}(undef, W) - for (_, b) in blocks(t) - rand!(b, -20:20) + W1 = V1 ← one(V1) + W2 = one(V2) ← V2 + W3 = V1 ⊗ V2 ← one(V1) + W4 = V1 ← V2 + W5 = one(V1) ← V1 ⊗ V2 + W6 = V1 ⊗ V2 ⊗ V3 ← V4 ⊗ V5 + for W in (W1, W2, W3, W4, W5, W6) + for T in (Int, Float32, ComplexF64) + if T == Int + t = TensorMap{T}(undef, W) + for (_, b) in blocks(t) + rand!(b, -20:20) + end + else + t = @constinferred randn(T, W) end - else - t = @constinferred randn(T, W) + a = @constinferred convert(Array, t) + b = reshape(a, dim(codomain(W)), dim(domain(W))) + @test t ≈ @constinferred TensorMap(a, W) + @test t ≈ @constinferred TensorMap(b, W) + @test t === @constinferred TensorMap(t.data, W) end - a = @constinferred convert(Array, t) - @test t ≈ @constinferred TensorMap(a, W) - # also test if input is matrix - a2 = reshape(a, prod(dim, codomain(t)), prod(dim, domain(t))) - @test t ≈ @constinferred TensorMap(a2, codomain(t), domain(t)) end end end @@ -149,510 +156,510 @@ for V in spacelist @test dot(t2, t) ≈ conj(dot(t2', t')) @test dot(t2, t) ≈ dot(t', t2') - i1 = @constinferred(isomorphism(Matrix{T}, V1 ⊗ V2, V2 ⊗ V1)) - i2 = @constinferred(isomorphism(Matrix{T}, V2 ⊗ V1, V1 ⊗ V2)) - @test i1 * i2 == @constinferred(id(Matrix{T}, V1 ⊗ V2)) - @test i2 * i1 == @constinferred(id(Matrix{T}, V2 ⊗ V1)) + i1 = @constinferred(isomorphism(T, V1 ⊗ V2, V2 ⊗ V1)) + i2 = @constinferred(isomorphism(Vector{T}, V2 ⊗ V1, V1 ⊗ V2)) + @test i1 * i2 == @constinferred(id(T, V1 ⊗ V2)) + @test i2 * i1 == @constinferred(id(Vector{T}, V2 ⊗ V1)) - w = @constinferred(isometry(Matrix{T}, V1 ⊗ (oneunit(V1) ⊕ oneunit(V1)), + w = @constinferred(isometry(T, V1 ⊗ (oneunit(V1) ⊕ oneunit(V1)), V1)) @test dim(w) == 2 * dim(V1 ← V1) - @test w' * w == id(Matrix{T}, V1) + @test w' * w == id(Vector{T}, V1) @test w * w' == (w * w')^2 end end - if hasfusiontensor(I) - @timedtestset "Basic linear algebra: test via conversion" begin - W = V1 ⊗ V2 ⊗ V3 ← V4 ⊗ V5 - for T in (Float32, ComplexF64) - t = rand(T, W) - t2 = @constinferred rand!(similar(t)) - @test norm(t, 2) ≈ norm(convert(Array, t), 2) - @test dot(t2, t) ≈ dot(convert(Array, t2), convert(Array, t)) - α = rand(T) - @test convert(Array, α * t) ≈ α * convert(Array, t) - @test convert(Array, t + t) ≈ 2 * convert(Array, t) - end - end - @timedtestset "Real and imaginary parts" begin - W = V1 ⊗ V2 - for T in (Float64, ComplexF64, ComplexF32) - t = @constinferred randn(T, W, W) - @test real(convert(Array, t)) == convert(Array, @constinferred real(t)) - @test imag(convert(Array, t)) == convert(Array, @constinferred imag(t)) - end - end - end - @timedtestset "Tensor conversion" begin - W = V1 ⊗ V2 - t = @constinferred randn(W ← W) - @test typeof(convert(TensorMap, t')) == typeof(t) - tc = complex(t) - @test convert(typeof(tc), t) == tc - @test typeof(convert(typeof(tc), t)) == typeof(tc) - @test typeof(convert(typeof(tc), t')) == typeof(tc) - @test Base.promote_typeof(t, tc) == typeof(tc) - @test Base.promote_typeof(tc, t) == typeof(tc + t) - end - @timedtestset "diag/diagm" begin - W = V1 ⊗ V2 ⊗ V3 ← V4 ⊗ V5 - t = randn(ComplexF64, W) - d = LinearAlgebra.diag(t) - D = LinearAlgebra.diagm(codomain(t), domain(t), d) - @test LinearAlgebra.isdiag(D) - @test LinearAlgebra.diag(D) == d - end - @timedtestset "Permutations: test via inner product invariance" begin - W = V1 ⊗ V2 ⊗ V3 ⊗ V4 ⊗ V5 - t = rand(ComplexF64, W) - t′ = randn!(similar(t)) - for k in 0:5 - for p in permutations(1:5) - p1 = ntuple(n -> p[n], k) - p2 = ntuple(n -> p[k + n], 5 - k) - t2 = @constinferred permute(t, (p1, p2)) - @test norm(t2) ≈ norm(t) - t2′ = permute(t′, (p1, p2)) - @test dot(t2′, t2) ≈ dot(t′, t) ≈ dot(transpose(t2′), transpose(t2)) - end + # if hasfusiontensor(I) + # @timedtestset "Basic linear algebra: test via conversion" begin + # W = V1 ⊗ V2 ⊗ V3 ← V4 ⊗ V5 + # for T in (Float32, ComplexF64) + # t = rand(T, W) + # t2 = @constinferred rand!(similar(t)) + # @test norm(t, 2) ≈ norm(convert(Array, t), 2) + # @test dot(t2, t) ≈ dot(convert(Array, t2), convert(Array, t)) + # α = rand(T) + # @test convert(Array, α * t) ≈ α * convert(Array, t) + # @test convert(Array, t + t) ≈ 2 * convert(Array, t) + # end + # end + # @timedtestset "Real and imaginary parts" begin + # W = V1 ⊗ V2 + # for T in (Float64, ComplexF64, ComplexF32) + # t = @constinferred randn(T, W, W) + # @test real(convert(Array, t)) == convert(Array, @constinferred real(t)) + # @test imag(convert(Array, t)) == convert(Array, @constinferred imag(t)) + # end + # end + # end + # @timedtestset "Tensor conversion" begin + # W = V1 ⊗ V2 + # t = @constinferred randn(W ← W) + # @test typeof(convert(TensorMap, t')) == typeof(t) + # tc = complex(t) + # @test convert(typeof(tc), t) == tc + # @test typeof(convert(typeof(tc), t)) == typeof(tc) + # @test typeof(convert(typeof(tc), t')) == typeof(tc) + # @test Base.promote_typeof(t, tc) == typeof(tc) + # @test Base.promote_typeof(tc, t) == typeof(tc + t) + # end + # @timedtestset "diag/diagm" begin + # W = V1 ⊗ V2 ⊗ V3 ← V4 ⊗ V5 + # t = randn(ComplexF64, W) + # d = LinearAlgebra.diag(t) + # D = LinearAlgebra.diagm(codomain(t), domain(t), d) + # @test LinearAlgebra.isdiag(D) + # @test LinearAlgebra.diag(D) == d + # end + # @timedtestset "Permutations: test via inner product invariance" begin + # W = V1 ⊗ V2 ⊗ V3 ⊗ V4 ⊗ V5 + # t = rand(ComplexF64, W) + # t′ = randn!(similar(t)) + # for k in 0:5 + # for p in permutations(1:5) + # p1 = ntuple(n -> p[n], k) + # p2 = ntuple(n -> p[k + n], 5 - k) + # t2 = @constinferred permute(t, (p1, p2)) + # @test norm(t2) ≈ norm(t) + # t2′ = permute(t′, (p1, p2)) + # @test dot(t2′, t2) ≈ dot(t′, t) ≈ dot(transpose(t2′), transpose(t2)) + # end - t3 = VERSION < v"1.7" ? repartition(t, k) : - @constinferred repartition(t, $k) - @test norm(t3) ≈ norm(t) - t3′ = @constinferred repartition!(similar(t3), t′) - @test norm(t3′) ≈ norm(t′) - @test dot(t′, t) ≈ dot(t3′, t3) - end - end - if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) - @timedtestset "Permutations: test via conversion" begin - W = V1 ⊗ V2 ⊗ V3 ⊗ V4 ⊗ V5 - t = rand(ComplexF64, W) - a = convert(Array, t) - for k in 0:5 - for p in permutations(1:5) - p1 = ntuple(n -> p[n], k) - p2 = ntuple(n -> p[k + n], 5 - k) - t2 = permute(t, (p1, p2)) - a2 = convert(Array, t2) - @test a2 ≈ permutedims(a, (p1..., p2...)) - @test convert(Array, transpose(t2)) ≈ - permutedims(a2, (5, 4, 3, 2, 1)) - end + # t3 = VERSION < v"1.7" ? repartition(t, k) : + # @constinferred repartition(t, $k) + # @test norm(t3) ≈ norm(t) + # t3′ = @constinferred repartition!(similar(t3), t′) + # @test norm(t3′) ≈ norm(t′) + # @test dot(t′, t) ≈ dot(t3′, t3) + # end + # end + # if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) + # @timedtestset "Permutations: test via conversion" begin + # W = V1 ⊗ V2 ⊗ V3 ⊗ V4 ⊗ V5 + # t = rand(ComplexF64, W) + # a = convert(Array, t) + # for k in 0:5 + # for p in permutations(1:5) + # p1 = ntuple(n -> p[n], k) + # p2 = ntuple(n -> p[k + n], 5 - k) + # t2 = permute(t, (p1, p2)) + # a2 = convert(Array, t2) + # @test a2 ≈ permutedims(a, (p1..., p2...)) + # @test convert(Array, transpose(t2)) ≈ + # permutedims(a2, (5, 4, 3, 2, 1)) + # end - t3 = repartition(t, k) - a3 = convert(Array, t3) - @test a3 ≈ permutedims(a, - (ntuple(identity, k)..., - reverse(ntuple(i -> i + k, 5 - k))...)) - end - end - end - @timedtestset "Full trace: test self-consistency" begin - t = rand(ComplexF64, V1 ⊗ V2' ⊗ V2 ⊗ V1') - t2 = permute(t, ((1, 2), (4, 3))) - s = @constinferred tr(t2) - @test conj(s) ≈ tr(t2') - if !isdual(V1) - t2 = twist!(t2, 1) - end - if isdual(V2) - t2 = twist!(t2, 2) - end - ss = tr(t2) - @tensor s2 = t[a, b, b, a] - @tensor t3[a, b] := t[a, c, c, b] - @tensor s3 = t3[a, a] - @test ss ≈ s2 - @test ss ≈ s3 - end - @timedtestset "Partial trace: test self-consistency" begin - t = rand(ComplexF64, V1 ⊗ V2' ⊗ V3 ⊗ V2 ⊗ V1' ⊗ V3') - @tensor t2[a, b] := t[c, d, b, d, c, a] - @tensor t4[a, b, c, d] := t[d, e, b, e, c, a] - @tensor t5[a, b] := t4[a, b, c, c] - @test t2 ≈ t5 - end - if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) - @timedtestset "Trace: test via conversion" begin - t = rand(ComplexF64, V1 ⊗ V2' ⊗ V3 ⊗ V2 ⊗ V1' ⊗ V3') - @tensor t2[a, b] := t[c, d, b, d, c, a] - @tensor t3[a, b] := convert(Array, t)[c, d, b, d, c, a] - @test t3 ≈ convert(Array, t2) - end - end - @timedtestset "Trace and contraction" begin - t1 = rand(ComplexF64, V1 ⊗ V2 ⊗ V3) - t2 = rand(ComplexF64, V2' ⊗ V4 ⊗ V1') - t3 = t1 ⊗ t2 - @tensor ta[a, b] := t1[x, y, a] * t2[y, b, x] - @tensor tb[a, b] := t3[x, y, a, y, b, x] - @test ta ≈ tb - end - if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) - @timedtestset "Tensor contraction: test via conversion" begin - A1 = randn(ComplexF64, V1' * V2', V3') - A2 = randn(ComplexF64, V3 * V4, V5) - rhoL = randn(ComplexF64, V1, V1) - rhoR = randn(ComplexF64, V5, V5)' # test adjoint tensor - H = randn(ComplexF64, V2 * V4, V2 * V4) - @tensor HrA12[a, s1, s2, c] := rhoL[a, a'] * conj(A1[a', t1, b]) * - A2[b, t2, c'] * rhoR[c', c] * - H[s1, s2, t1, t2] + # t3 = repartition(t, k) + # a3 = convert(Array, t3) + # @test a3 ≈ permutedims(a, + # (ntuple(identity, k)..., + # reverse(ntuple(i -> i + k, 5 - k))...)) + # end + # end + # end + # @timedtestset "Full trace: test self-consistency" begin + # t = rand(ComplexF64, V1 ⊗ V2' ⊗ V2 ⊗ V1') + # t2 = permute(t, ((1, 2), (4, 3))) + # s = @constinferred tr(t2) + # @test conj(s) ≈ tr(t2') + # if !isdual(V1) + # t2 = twist!(t2, 1) + # end + # if isdual(V2) + # t2 = twist!(t2, 2) + # end + # ss = tr(t2) + # @tensor s2 = t[a, b, b, a] + # @tensor t3[a, b] := t[a, c, c, b] + # @tensor s3 = t3[a, a] + # @test ss ≈ s2 + # @test ss ≈ s3 + # end + # @timedtestset "Partial trace: test self-consistency" begin + # t = rand(ComplexF64, V1 ⊗ V2' ⊗ V3 ⊗ V2 ⊗ V1' ⊗ V3') + # @tensor t2[a, b] := t[c, d, b, d, c, a] + # @tensor t4[a, b, c, d] := t[d, e, b, e, c, a] + # @tensor t5[a, b] := t4[a, b, c, c] + # @test t2 ≈ t5 + # end + # if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) + # @timedtestset "Trace: test via conversion" begin + # t = rand(ComplexF64, V1 ⊗ V2' ⊗ V3 ⊗ V2 ⊗ V1' ⊗ V3') + # @tensor t2[a, b] := t[c, d, b, d, c, a] + # @tensor t3[a, b] := convert(Array, t)[c, d, b, d, c, a] + # @test t3 ≈ convert(Array, t2) + # end + # end + # @timedtestset "Trace and contraction" begin + # t1 = rand(ComplexF64, V1 ⊗ V2 ⊗ V3) + # t2 = rand(ComplexF64, V2' ⊗ V4 ⊗ V1') + # t3 = t1 ⊗ t2 + # @tensor ta[a, b] := t1[x, y, a] * t2[y, b, x] + # @tensor tb[a, b] := t3[x, y, a, y, b, x] + # @test ta ≈ tb + # end + # if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) + # @timedtestset "Tensor contraction: test via conversion" begin + # A1 = randn(ComplexF64, V1' * V2', V3') + # A2 = randn(ComplexF64, V3 * V4, V5) + # rhoL = randn(ComplexF64, V1, V1) + # rhoR = randn(ComplexF64, V5, V5)' # test adjoint tensor + # H = randn(ComplexF64, V2 * V4, V2 * V4) + # @tensor HrA12[a, s1, s2, c] := rhoL[a, a'] * conj(A1[a', t1, b]) * + # A2[b, t2, c'] * rhoR[c', c] * + # H[s1, s2, t1, t2] - @tensor HrA12array[a, s1, s2, c] := convert(Array, rhoL)[a, a'] * - conj(convert(Array, A1)[a', t1, b]) * - convert(Array, A2)[b, t2, c'] * - convert(Array, rhoR)[c', c] * - convert(Array, H)[s1, s2, t1, t2] + # @tensor HrA12array[a, s1, s2, c] := convert(Array, rhoL)[a, a'] * + # conj(convert(Array, A1)[a', t1, b]) * + # convert(Array, A2)[b, t2, c'] * + # convert(Array, rhoR)[c', c] * + # convert(Array, H)[s1, s2, t1, t2] - @test HrA12array ≈ convert(Array, HrA12) - end - end - @timedtestset "Multiplication and inverse: test compatibility" begin - W1 = V1 ⊗ V2 ⊗ V3 - W2 = V4 ⊗ V5 - for T in (Float64, ComplexF64) - t1 = rand(T, W1, W1) - t2 = rand(T, W2, W2) - t = rand(T, W1, W2) - @test t1 * (t1 \ t) ≈ t - @test (t / t2) * t2 ≈ t - @test t1 \ one(t1) ≈ inv(t1) - @test one(t1) / t1 ≈ pinv(t1) - @test_throws SpaceMismatch inv(t) - @test_throws SpaceMismatch t2 \ t - @test_throws SpaceMismatch t / t1 - tp = pinv(t) * t - @test tp ≈ tp * tp - end - end - if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) - @timedtestset "Multiplication and inverse: test via conversion" begin - W1 = V1 ⊗ V2 ⊗ V3 - W2 = V4 ⊗ V5 - for T in (Float32, Float64, ComplexF32, ComplexF64) - t1 = rand(T, W1, W1) - t2 = rand(T, W2, W2) - t = rand(T, W1, W2) - d1 = dim(W1) - d2 = dim(W2) - At1 = reshape(convert(Array, t1), d1, d1) - At2 = reshape(convert(Array, t2), d2, d2) - At = reshape(convert(Array, t), d1, d2) - @test reshape(convert(Array, t1 * t), d1, d2) ≈ At1 * At - @test reshape(convert(Array, t1' * t), d1, d2) ≈ At1' * At - @test reshape(convert(Array, t2 * t'), d2, d1) ≈ At2 * At' - @test reshape(convert(Array, t2' * t'), d2, d1) ≈ At2' * At' + # @test HrA12array ≈ convert(Array, HrA12) + # end + # end + # @timedtestset "Multiplication and inverse: test compatibility" begin + # W1 = V1 ⊗ V2 ⊗ V3 + # W2 = V4 ⊗ V5 + # for T in (Float64, ComplexF64) + # t1 = rand(T, W1, W1) + # t2 = rand(T, W2, W2) + # t = rand(T, W1, W2) + # @test t1 * (t1 \ t) ≈ t + # @test (t / t2) * t2 ≈ t + # @test t1 \ one(t1) ≈ inv(t1) + # @test one(t1) / t1 ≈ pinv(t1) + # @test_throws SpaceMismatch inv(t) + # @test_throws SpaceMismatch t2 \ t + # @test_throws SpaceMismatch t / t1 + # tp = pinv(t) * t + # @test tp ≈ tp * tp + # end + # end + # if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) + # @timedtestset "Multiplication and inverse: test via conversion" begin + # W1 = V1 ⊗ V2 ⊗ V3 + # W2 = V4 ⊗ V5 + # for T in (Float32, Float64, ComplexF32, ComplexF64) + # t1 = rand(T, W1, W1) + # t2 = rand(T, W2, W2) + # t = rand(T, W1, W2) + # d1 = dim(W1) + # d2 = dim(W2) + # At1 = reshape(convert(Array, t1), d1, d1) + # At2 = reshape(convert(Array, t2), d2, d2) + # At = reshape(convert(Array, t), d1, d2) + # @test reshape(convert(Array, t1 * t), d1, d2) ≈ At1 * At + # @test reshape(convert(Array, t1' * t), d1, d2) ≈ At1' * At + # @test reshape(convert(Array, t2 * t'), d2, d1) ≈ At2 * At' + # @test reshape(convert(Array, t2' * t'), d2, d1) ≈ At2' * At' - @test reshape(convert(Array, inv(t1)), d1, d1) ≈ inv(At1) - @test reshape(convert(Array, pinv(t)), d2, d1) ≈ pinv(At) + # @test reshape(convert(Array, inv(t1)), d1, d1) ≈ inv(At1) + # @test reshape(convert(Array, pinv(t)), d2, d1) ≈ pinv(At) - if T == Float32 || T == ComplexF32 - continue - end + # if T == Float32 || T == ComplexF32 + # continue + # end - @test reshape(convert(Array, t1 \ t), d1, d2) ≈ At1 \ At - @test reshape(convert(Array, t1' \ t), d1, d2) ≈ At1' \ At - @test reshape(convert(Array, t2 \ t'), d2, d1) ≈ At2 \ At' - @test reshape(convert(Array, t2' \ t'), d2, d1) ≈ At2' \ At' + # @test reshape(convert(Array, t1 \ t), d1, d2) ≈ At1 \ At + # @test reshape(convert(Array, t1' \ t), d1, d2) ≈ At1' \ At + # @test reshape(convert(Array, t2 \ t'), d2, d1) ≈ At2 \ At' + # @test reshape(convert(Array, t2' \ t'), d2, d1) ≈ At2' \ At' - @test reshape(convert(Array, t2 / t), d2, d1) ≈ At2 / At - @test reshape(convert(Array, t2' / t), d2, d1) ≈ At2' / At - @test reshape(convert(Array, t1 / t'), d1, d2) ≈ At1 / At' - @test reshape(convert(Array, t1' / t'), d1, d2) ≈ At1' / At' - end - end - end - @timedtestset "Factorization" begin - W = V1 ⊗ V2 ⊗ V3 ⊗ V4 ⊗ V5 - for T in (Float32, ComplexF64) - # Test both a normal tensor and an adjoint one. - ts = (rand(T, W), rand(T, W)') - for t in ts - @testset "leftorth with $alg" for alg in - (TensorKit.QR(), TensorKit.QRpos(), - TensorKit.QL(), TensorKit.QLpos(), - TensorKit.Polar(), TensorKit.SVD(), - TensorKit.SDD()) - Q, R = @constinferred leftorth(t, ((3, 4, 2), (1, 5)); alg=alg) - QdQ = Q' * Q - @test QdQ ≈ one(QdQ) - @test Q * R ≈ permute(t, ((3, 4, 2), (1, 5))) - if alg isa Polar - @test isposdef(R) - @test domain(R) == codomain(R) == space(t, 1)' ⊗ space(t, 5)' - end - end - @testset "leftnull with $alg" for alg in - (TensorKit.QR(), TensorKit.SVD(), - TensorKit.SDD()) - N = @constinferred leftnull(t, ((3, 4, 2), (1, 5)); alg=alg) - NdN = N' * N - @test NdN ≈ one(NdN) - @test norm(N' * permute(t, ((3, 4, 2), (1, 5)))) < - 100 * eps(norm(t)) - end - @testset "rightorth with $alg" for alg in - (TensorKit.RQ(), TensorKit.RQpos(), - TensorKit.LQ(), TensorKit.LQpos(), - TensorKit.Polar(), TensorKit.SVD(), - TensorKit.SDD()) - L, Q = @constinferred rightorth(t, ((3, 4), (2, 1, 5)); alg=alg) - QQd = Q * Q' - @test QQd ≈ one(QQd) - @test L * Q ≈ permute(t, ((3, 4), (2, 1, 5))) - if alg isa Polar - @test isposdef(L) - @test domain(L) == codomain(L) == space(t, 3) ⊗ space(t, 4) - end - end - @testset "rightnull with $alg" for alg in - (TensorKit.LQ(), TensorKit.SVD(), - TensorKit.SDD()) - M = @constinferred rightnull(t, ((3, 4), (2, 1, 5)); alg=alg) - MMd = M * M' - @test MMd ≈ one(MMd) - @test norm(permute(t, ((3, 4), (2, 1, 5))) * M') < - 100 * eps(norm(t)) - end - @testset "tsvd with $alg" for alg in (TensorKit.SVD(), TensorKit.SDD()) - U, S, V = @constinferred tsvd(t, ((3, 4, 2), (1, 5)); alg=alg) - UdU = U' * U - @test UdU ≈ one(UdU) - VVd = V * V' - @test VVd ≈ one(VVd) - t2 = permute(t, ((3, 4, 2), (1, 5))) - @test U * S * V ≈ t2 + # @test reshape(convert(Array, t2 / t), d2, d1) ≈ At2 / At + # @test reshape(convert(Array, t2' / t), d2, d1) ≈ At2' / At + # @test reshape(convert(Array, t1 / t'), d1, d2) ≈ At1 / At' + # @test reshape(convert(Array, t1' / t'), d1, d2) ≈ At1' / At' + # end + # end + # end + # @timedtestset "Factorization" begin + # W = V1 ⊗ V2 ⊗ V3 ⊗ V4 ⊗ V5 + # for T in (Float32, ComplexF64) + # # Test both a normal tensor and an adjoint one. + # ts = (rand(T, W), rand(T, W)') + # for t in ts + # @testset "leftorth with $alg" for alg in + # (TensorKit.QR(), TensorKit.QRpos(), + # TensorKit.QL(), TensorKit.QLpos(), + # TensorKit.Polar(), TensorKit.SVD(), + # TensorKit.SDD()) + # Q, R = @constinferred leftorth(t, ((3, 4, 2), (1, 5)); alg=alg) + # QdQ = Q' * Q + # @test QdQ ≈ one(QdQ) + # @test Q * R ≈ permute(t, ((3, 4, 2), (1, 5))) + # if alg isa Polar + # @test isposdef(R) + # @test domain(R) == codomain(R) == space(t, 1)' ⊗ space(t, 5)' + # end + # end + # @testset "leftnull with $alg" for alg in + # (TensorKit.QR(), TensorKit.SVD(), + # TensorKit.SDD()) + # N = @constinferred leftnull(t, ((3, 4, 2), (1, 5)); alg=alg) + # NdN = N' * N + # @test NdN ≈ one(NdN) + # @test norm(N' * permute(t, ((3, 4, 2), (1, 5)))) < + # 100 * eps(norm(t)) + # end + # @testset "rightorth with $alg" for alg in + # (TensorKit.RQ(), TensorKit.RQpos(), + # TensorKit.LQ(), TensorKit.LQpos(), + # TensorKit.Polar(), TensorKit.SVD(), + # TensorKit.SDD()) + # L, Q = @constinferred rightorth(t, ((3, 4), (2, 1, 5)); alg=alg) + # QQd = Q * Q' + # @test QQd ≈ one(QQd) + # @test L * Q ≈ permute(t, ((3, 4), (2, 1, 5))) + # if alg isa Polar + # @test isposdef(L) + # @test domain(L) == codomain(L) == space(t, 3) ⊗ space(t, 4) + # end + # end + # @testset "rightnull with $alg" for alg in + # (TensorKit.LQ(), TensorKit.SVD(), + # TensorKit.SDD()) + # M = @constinferred rightnull(t, ((3, 4), (2, 1, 5)); alg=alg) + # MMd = M * M' + # @test MMd ≈ one(MMd) + # @test norm(permute(t, ((3, 4), (2, 1, 5))) * M') < + # 100 * eps(norm(t)) + # end + # @testset "tsvd with $alg" for alg in (TensorKit.SVD(), TensorKit.SDD()) + # U, S, V = @constinferred tsvd(t, ((3, 4, 2), (1, 5)); alg=alg) + # UdU = U' * U + # @test UdU ≈ one(UdU) + # VVd = V * V' + # @test VVd ≈ one(VVd) + # t2 = permute(t, ((3, 4, 2), (1, 5))) + # @test U * S * V ≈ t2 - s = LinearAlgebra.svdvals(t2) - s′ = LinearAlgebra.diag(S) - for (c, b) in s - @test b ≈ s′[c] - end - end - end - @testset "empty tensor" begin - t = randn(T, V1 ⊗ V2, typeof(V1)()) - @testset "leftorth with $alg" for alg in - (TensorKit.QR(), TensorKit.QRpos(), - TensorKit.QL(), TensorKit.QLpos(), - TensorKit.Polar(), TensorKit.SVD(), - TensorKit.SDD()) - Q, R = @constinferred leftorth(t; alg=alg) - @test Q == t - @test dim(Q) == dim(R) == 0 - end - @testset "leftnull with $alg" for alg in - (TensorKit.QR(), TensorKit.SVD(), - TensorKit.SDD()) - N = @constinferred leftnull(t; alg=alg) - @test N' * N ≈ id(domain(N)) - @test N * N' ≈ id(codomain(N)) - end - @testset "rightorth with $alg" for alg in - (TensorKit.RQ(), TensorKit.RQpos(), - TensorKit.LQ(), TensorKit.LQpos(), - TensorKit.Polar(), TensorKit.SVD(), - TensorKit.SDD()) - L, Q = @constinferred rightorth(copy(t'); alg=alg) - @test Q == t' - @test dim(Q) == dim(L) == 0 - end - @testset "rightnull with $alg" for alg in - (TensorKit.LQ(), TensorKit.SVD(), - TensorKit.SDD()) - M = @constinferred rightnull(copy(t'); alg=alg) - @test M * M' ≈ id(codomain(M)) - @test M' * M ≈ id(domain(M)) - end - @testset "tsvd with $alg" for alg in (TensorKit.SVD(), TensorKit.SDD()) - U, S, V = @constinferred tsvd(t; alg=alg) - @test U == t - @test dim(U) == dim(S) == dim(V) - end - end + # s = LinearAlgebra.svdvals(t2) + # s′ = LinearAlgebra.diag(S) + # for (c, b) in s + # @test b ≈ s′[c] + # end + # end + # end + # @testset "empty tensor" begin + # t = randn(T, V1 ⊗ V2, typeof(V1)()) + # @testset "leftorth with $alg" for alg in + # (TensorKit.QR(), TensorKit.QRpos(), + # TensorKit.QL(), TensorKit.QLpos(), + # TensorKit.Polar(), TensorKit.SVD(), + # TensorKit.SDD()) + # Q, R = @constinferred leftorth(t; alg=alg) + # @test Q == t + # @test dim(Q) == dim(R) == 0 + # end + # @testset "leftnull with $alg" for alg in + # (TensorKit.QR(), TensorKit.SVD(), + # TensorKit.SDD()) + # N = @constinferred leftnull(t; alg=alg) + # @test N' * N ≈ id(domain(N)) + # @test N * N' ≈ id(codomain(N)) + # end + # @testset "rightorth with $alg" for alg in + # (TensorKit.RQ(), TensorKit.RQpos(), + # TensorKit.LQ(), TensorKit.LQpos(), + # TensorKit.Polar(), TensorKit.SVD(), + # TensorKit.SDD()) + # L, Q = @constinferred rightorth(copy(t'); alg=alg) + # @test Q == t' + # @test dim(Q) == dim(L) == 0 + # end + # @testset "rightnull with $alg" for alg in + # (TensorKit.LQ(), TensorKit.SVD(), + # TensorKit.SDD()) + # M = @constinferred rightnull(copy(t'); alg=alg) + # @test M * M' ≈ id(codomain(M)) + # @test M' * M ≈ id(domain(M)) + # end + # @testset "tsvd with $alg" for alg in (TensorKit.SVD(), TensorKit.SDD()) + # U, S, V = @constinferred tsvd(t; alg=alg) + # @test U == t + # @test dim(U) == dim(S) == dim(V) + # end + # end - t = rand(T, V1 ⊗ V1' ⊗ V2 ⊗ V2') - @testset "eig and isposdef" begin - D, V = eigen(t, ((1, 3), (2, 4))) - t2 = permute(t, ((1, 3), (2, 4))) - @test t2 * V ≈ V * D + # t = rand(T, V1 ⊗ V1' ⊗ V2 ⊗ V2') + # @testset "eig and isposdef" begin + # D, V = eigen(t, ((1, 3), (2, 4))) + # t2 = permute(t, ((1, 3), (2, 4))) + # @test t2 * V ≈ V * D - d = LinearAlgebra.eigvals(t2; sortby=nothing) - d′ = LinearAlgebra.diag(D) - for (c, b) in d - @test b ≈ d′[c] - end + # d = LinearAlgebra.eigvals(t2; sortby=nothing) + # d′ = LinearAlgebra.diag(D) + # for (c, b) in d + # @test b ≈ d′[c] + # end - # Somehow moving these test before the previous one gives rise to errors - # with T=Float32 on x86 platforms. Is this an OpenBLAS issue? - VdV = V' * V - VdV = (VdV + VdV') / 2 - @test isposdef(VdV) + # # Somehow moving these test before the previous one gives rise to errors + # # with T=Float32 on x86 platforms. Is this an OpenBLAS issue? + # VdV = V' * V + # VdV = (VdV + VdV') / 2 + # @test isposdef(VdV) - @test !isposdef(t2) # unlikely for non-hermitian map - t2 = (t2 + t2') - D, V = eigen(t2) - VdV = V' * V - @test VdV ≈ one(VdV) - D̃, Ṽ = @constinferred eigh(t2) - @test D ≈ D̃ - @test V ≈ Ṽ - λ = minimum(minimum(real(LinearAlgebra.diag(b))) - for (c, b) in blocks(D)) - @test isposdef(t2) == isposdef(λ) - @test isposdef(t2 - λ * one(t2) + 0.1 * one(t2)) - @test !isposdef(t2 - λ * one(t2) - 0.1 * one(t2)) - end - end - end - @timedtestset "Tensor truncation" begin - for T in (Float32, ComplexF64) - for p in (1, 2, 3, Inf) - # Test both a normal tensor and an adjoint one. - ts = (randn(T, V1 ⊗ V2 ⊗ V3, V4 ⊗ V5), - randn(T, V4 ⊗ V5, V1 ⊗ V2 ⊗ V3)') - for t in ts - U₀, S₀, V₀, = tsvd(t) - t = rmul!(t, 1 / norm(S₀, p)) - U, S, V, ϵ = @constinferred tsvd(t; trunc=truncerr(5e-1), p=p) - # @show p, ϵ - # @show domain(S) - # @test min(space(S,1), space(S₀,1)) != space(S₀,1) - U′, S′, V′, ϵ′ = tsvd(t; trunc=truncerr(nextfloat(ϵ)), p=p) - @test (U, S, V, ϵ) == (U′, S′, V′, ϵ′) - U′, S′, V′, ϵ′ = tsvd(t; trunc=truncdim(ceil(Int, dim(domain(S)))), - p=p) - @test (U, S, V, ϵ) == (U′, S′, V′, ϵ′) - U′, S′, V′, ϵ′ = tsvd(t; trunc=truncspace(space(S, 1)), p=p) - @test (U, S, V, ϵ) == (U′, S′, V′, ϵ′) - # results with truncationcutoff cannot be compared because they don't take degeneracy into account, and thus truncate differently - U, S, V, ϵ = tsvd(t; trunc=truncbelow(1 / dim(domain(S₀))), p=p) - # @show p, ϵ - # @show domain(S) - # @test min(space(S,1), space(S₀,1)) != space(S₀,1) - U′, S′, V′, ϵ′ = tsvd(t; trunc=truncspace(space(S, 1)), p=p) - @test (U, S, V, ϵ) == (U′, S′, V′, ϵ′) - end - end - end - end - if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) - @timedtestset "Tensor functions" begin - W = V1 ⊗ V2 - for T in (Float64, ComplexF64) - t = randn(T, W, W) - s = dim(W) - expt = @constinferred exp(t) - @test reshape(convert(Array, expt), (s, s)) ≈ - exp(reshape(convert(Array, t), (s, s))) + # @test !isposdef(t2) # unlikely for non-hermitian map + # t2 = (t2 + t2') + # D, V = eigen(t2) + # VdV = V' * V + # @test VdV ≈ one(VdV) + # D̃, Ṽ = @constinferred eigh(t2) + # @test D ≈ D̃ + # @test V ≈ Ṽ + # λ = minimum(minimum(real(LinearAlgebra.diag(b))) + # for (c, b) in blocks(D)) + # @test isposdef(t2) == isposdef(λ) + # @test isposdef(t2 - λ * one(t2) + 0.1 * one(t2)) + # @test !isposdef(t2 - λ * one(t2) - 0.1 * one(t2)) + # end + # end + # end + # @timedtestset "Tensor truncation" begin + # for T in (Float32, ComplexF64) + # for p in (1, 2, 3, Inf) + # # Test both a normal tensor and an adjoint one. + # ts = (randn(T, V1 ⊗ V2 ⊗ V3, V4 ⊗ V5), + # randn(T, V4 ⊗ V5, V1 ⊗ V2 ⊗ V3)') + # for t in ts + # U₀, S₀, V₀, = tsvd(t) + # t = rmul!(t, 1 / norm(S₀, p)) + # U, S, V, ϵ = @constinferred tsvd(t; trunc=truncerr(5e-1), p=p) + # # @show p, ϵ + # # @show domain(S) + # # @test min(space(S,1), space(S₀,1)) != space(S₀,1) + # U′, S′, V′, ϵ′ = tsvd(t; trunc=truncerr(nextfloat(ϵ)), p=p) + # @test (U, S, V, ϵ) == (U′, S′, V′, ϵ′) + # U′, S′, V′, ϵ′ = tsvd(t; trunc=truncdim(ceil(Int, dim(domain(S)))), + # p=p) + # @test (U, S, V, ϵ) == (U′, S′, V′, ϵ′) + # U′, S′, V′, ϵ′ = tsvd(t; trunc=truncspace(space(S, 1)), p=p) + # @test (U, S, V, ϵ) == (U′, S′, V′, ϵ′) + # # results with truncationcutoff cannot be compared because they don't take degeneracy into account, and thus truncate differently + # U, S, V, ϵ = tsvd(t; trunc=truncbelow(1 / dim(domain(S₀))), p=p) + # # @show p, ϵ + # # @show domain(S) + # # @test min(space(S,1), space(S₀,1)) != space(S₀,1) + # U′, S′, V′, ϵ′ = tsvd(t; trunc=truncspace(space(S, 1)), p=p) + # @test (U, S, V, ϵ) == (U′, S′, V′, ϵ′) + # end + # end + # end + # end + # if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) + # @timedtestset "Tensor functions" begin + # W = V1 ⊗ V2 + # for T in (Float64, ComplexF64) + # t = randn(T, W, W) + # s = dim(W) + # expt = @constinferred exp(t) + # @test reshape(convert(Array, expt), (s, s)) ≈ + # exp(reshape(convert(Array, t), (s, s))) - @test (@constinferred sqrt(t))^2 ≈ t - @test reshape(convert(Array, sqrt(t^2)), (s, s)) ≈ - sqrt(reshape(convert(Array, t^2), (s, s))) + # @test (@constinferred sqrt(t))^2 ≈ t + # @test reshape(convert(Array, sqrt(t^2)), (s, s)) ≈ + # sqrt(reshape(convert(Array, t^2), (s, s))) - @test exp(@constinferred log(expt)) ≈ expt - @test reshape(convert(Array, log(expt)), (s, s)) ≈ - log(reshape(convert(Array, expt), (s, s))) + # @test exp(@constinferred log(expt)) ≈ expt + # @test reshape(convert(Array, log(expt)), (s, s)) ≈ + # log(reshape(convert(Array, expt), (s, s))) - @test (@constinferred cos(t))^2 + (@constinferred sin(t))^2 ≈ id(W) - @test (@constinferred tan(t)) ≈ sin(t) / cos(t) - @test (@constinferred cot(t)) ≈ cos(t) / sin(t) - @test (@constinferred cosh(t))^2 - (@constinferred sinh(t))^2 ≈ id(W) - @test (@constinferred tanh(t)) ≈ sinh(t) / cosh(t) - @test (@constinferred coth(t)) ≈ cosh(t) / sinh(t) + # @test (@constinferred cos(t))^2 + (@constinferred sin(t))^2 ≈ id(W) + # @test (@constinferred tan(t)) ≈ sin(t) / cos(t) + # @test (@constinferred cot(t)) ≈ cos(t) / sin(t) + # @test (@constinferred cosh(t))^2 - (@constinferred sinh(t))^2 ≈ id(W) + # @test (@constinferred tanh(t)) ≈ sinh(t) / cosh(t) + # @test (@constinferred coth(t)) ≈ cosh(t) / sinh(t) - t1 = sin(t) - @test sin(@constinferred asin(t1)) ≈ t1 - t2 = cos(t) - @test cos(@constinferred acos(t2)) ≈ t2 - t3 = sinh(t) - @test sinh(@constinferred asinh(t3)) ≈ t3 - t4 = cosh(t) - @test cosh(@constinferred acosh(t4)) ≈ t4 - t5 = tan(t) - @test tan(@constinferred atan(t5)) ≈ t5 - t6 = cot(t) - @test cot(@constinferred acot(t6)) ≈ t6 - t7 = tanh(t) - @test tanh(@constinferred atanh(t7)) ≈ t7 - t8 = coth(t) - @test coth(@constinferred acoth(t8)) ≈ t8 - end - end - end - @timedtestset "Sylvester equation" begin - for T in (Float32, ComplexF64) - tA = rand(T, V1 ⊗ V3, V1 ⊗ V3) - tB = rand(T, V2 ⊗ V4, V2 ⊗ V4) - tA = 3 // 2 * leftorth(tA; alg=Polar())[1] - tB = 1 // 5 * leftorth(tB; alg=Polar())[1] - tC = rand(T, V1 ⊗ V3, V2 ⊗ V4) - t = @constinferred sylvester(tA, tB, tC) - @test codomain(t) == V1 ⊗ V3 - @test domain(t) == V2 ⊗ V4 - @test norm(tA * t + t * tB + tC) < - (norm(tA) + norm(tB) + norm(tC)) * eps(real(T))^(2 / 3) - if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) - matrix(x) = reshape(convert(Array, x), dim(codomain(x)), dim(domain(x))) - @test matrix(t) ≈ sylvester(matrix(tA), matrix(tB), matrix(tC)) - end - end - end - @timedtestset "Tensor product: test via norm preservation" begin - for T in (Float32, ComplexF64) - t1 = rand(T, V2 ⊗ V3 ⊗ V1, V1 ⊗ V2) - t2 = rand(T, V2 ⊗ V1 ⊗ V3, V1 ⊗ V1) - t = @constinferred (t1 ⊗ t2) - @test norm(t) ≈ norm(t1) * norm(t2) - end - end - if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) - @timedtestset "Tensor product: test via conversion" begin - for T in (Float32, ComplexF64) - t1 = rand(T, V2 ⊗ V3 ⊗ V1, V1) - t2 = rand(T, V2 ⊗ V1 ⊗ V3, V2) - t = @constinferred (t1 ⊗ t2) - d1 = dim(codomain(t1)) - d2 = dim(codomain(t2)) - d3 = dim(domain(t1)) - d4 = dim(domain(t2)) - At = convert(Array, t) - @test reshape(At, (d1, d2, d3, d4)) ≈ - reshape(convert(Array, t1), (d1, 1, d3, 1)) .* - reshape(convert(Array, t2), (1, d2, 1, d4)) - end - end - end - @timedtestset "Tensor product: test via tensor contraction" begin - for T in (Float32, ComplexF64) - t1 = rand(T, V2 ⊗ V3 ⊗ V1) - t2 = rand(T, V2 ⊗ V1 ⊗ V3) - t = @constinferred (t1 ⊗ t2) - @tensor t′[1, 2, 3, 4, 5, 6] := t1[1, 2, 3] * t2[4, 5, 6] - @test t ≈ t′ - end - end + # t1 = sin(t) + # @test sin(@constinferred asin(t1)) ≈ t1 + # t2 = cos(t) + # @test cos(@constinferred acos(t2)) ≈ t2 + # t3 = sinh(t) + # @test sinh(@constinferred asinh(t3)) ≈ t3 + # t4 = cosh(t) + # @test cosh(@constinferred acosh(t4)) ≈ t4 + # t5 = tan(t) + # @test tan(@constinferred atan(t5)) ≈ t5 + # t6 = cot(t) + # @test cot(@constinferred acot(t6)) ≈ t6 + # t7 = tanh(t) + # @test tanh(@constinferred atanh(t7)) ≈ t7 + # t8 = coth(t) + # @test coth(@constinferred acoth(t8)) ≈ t8 + # end + # end + # end + # @timedtestset "Sylvester equation" begin + # for T in (Float32, ComplexF64) + # tA = rand(T, V1 ⊗ V3, V1 ⊗ V3) + # tB = rand(T, V2 ⊗ V4, V2 ⊗ V4) + # tA = 3 // 2 * leftorth(tA; alg=Polar())[1] + # tB = 1 // 5 * leftorth(tB; alg=Polar())[1] + # tC = rand(T, V1 ⊗ V3, V2 ⊗ V4) + # t = @constinferred sylvester(tA, tB, tC) + # @test codomain(t) == V1 ⊗ V3 + # @test domain(t) == V2 ⊗ V4 + # @test norm(tA * t + t * tB + tC) < + # (norm(tA) + norm(tB) + norm(tC)) * eps(real(T))^(2 / 3) + # if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) + # matrix(x) = reshape(convert(Array, x), dim(codomain(x)), dim(domain(x))) + # @test matrix(t) ≈ sylvester(matrix(tA), matrix(tB), matrix(tC)) + # end + # end + # end + # @timedtestset "Tensor product: test via norm preservation" begin + # for T in (Float32, ComplexF64) + # t1 = rand(T, V2 ⊗ V3 ⊗ V1, V1 ⊗ V2) + # t2 = rand(T, V2 ⊗ V1 ⊗ V3, V1 ⊗ V1) + # t = @constinferred (t1 ⊗ t2) + # @test norm(t) ≈ norm(t1) * norm(t2) + # end + # end + # if BraidingStyle(I) isa Bosonic && hasfusiontensor(I) + # @timedtestset "Tensor product: test via conversion" begin + # for T in (Float32, ComplexF64) + # t1 = rand(T, V2 ⊗ V3 ⊗ V1, V1) + # t2 = rand(T, V2 ⊗ V1 ⊗ V3, V2) + # t = @constinferred (t1 ⊗ t2) + # d1 = dim(codomain(t1)) + # d2 = dim(codomain(t2)) + # d3 = dim(domain(t1)) + # d4 = dim(domain(t2)) + # At = convert(Array, t) + # @test reshape(At, (d1, d2, d3, d4)) ≈ + # reshape(convert(Array, t1), (d1, 1, d3, 1)) .* + # reshape(convert(Array, t2), (1, d2, 1, d4)) + # end + # end + # end + # @timedtestset "Tensor product: test via tensor contraction" begin + # for T in (Float32, ComplexF64) + # t1 = rand(T, V2 ⊗ V3 ⊗ V1) + # t2 = rand(T, V2 ⊗ V1 ⊗ V3) + # t = @constinferred (t1 ⊗ t2) + # @tensor t′[1, 2, 3, 4, 5, 6] := t1[1, 2, 3] * t2[4, 5, 6] + # @test t ≈ t′ + # end + # end end end -@timedtestset "Deligne tensor product: test via conversion" begin - @testset for Vlist1 in (Vtr, VSU₂), Vlist2 in (Vtr, Vℤ₂) - V1, V2, V3, V4, V5 = Vlist1 - W1, W2, W3, W4, W5 = Vlist2 - for T in (Float32, ComplexF64) - t1 = rand(T, V1 ⊗ V2, V3' ⊗ V4) - t2 = rand(T, W2, W1 ⊗ W1') - t = @constinferred (t1 ⊠ t2) - d1 = dim(codomain(t1)) - d2 = dim(codomain(t2)) - d3 = dim(domain(t1)) - d4 = dim(domain(t2)) - At = convert(Array, t) - @test reshape(At, (d1, d2, d3, d4)) ≈ - reshape(convert(Array, t1), (d1, 1, d3, 1)) .* - reshape(convert(Array, t2), (1, d2, 1, d4)) - end - end -end +# @timedtestset "Deligne tensor product: test via conversion" begin +# @testset for Vlist1 in (Vtr, VSU₂), Vlist2 in (Vtr, Vℤ₂) +# V1, V2, V3, V4, V5 = Vlist1 +# W1, W2, W3, W4, W5 = Vlist2 +# for T in (Float32, ComplexF64) +# t1 = rand(T, V1 ⊗ V2, V3' ⊗ V4) +# t2 = rand(T, W2, W1 ⊗ W1') +# t = @constinferred (t1 ⊠ t2) +# d1 = dim(codomain(t1)) +# d2 = dim(codomain(t2)) +# d3 = dim(domain(t1)) +# d4 = dim(domain(t2)) +# At = convert(Array, t) +# @test reshape(At, (d1, d2, d3, d4)) ≈ +# reshape(convert(Array, t1), (d1, 1, d3, 1)) .* +# reshape(convert(Array, t2), (1, d2, 1, d4)) +# end +# end +# end