Skip to content

test utils revamp #159

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 41 commits into from
Sep 25, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
e322d2c
Fix style
Aug 30, 2020
81b2bb8
Fix convention
Aug 30, 2020
c795cdb
First pass over test set implementation
Aug 30, 2020
24b0422
Add standardised tests to BaseKernels
Aug 30, 2020
32877de
Test composite kernels
Aug 30, 2020
a32eae6
Fix some tests
Aug 30, 2020
5815b41
Fix maha
Aug 30, 2020
14178ce
Fix sm
Aug 30, 2020
7ab9c52
Fix up maha
Aug 30, 2020
180c934
Remove redundant file
Aug 30, 2020
4954575
Move existing test utils over to module
Aug 30, 2020
a79de46
Add Gamma Exponential kernel reference
Aug 31, 2020
4ba9c35
Update src/matrix/kernelpdmat.jl
willtebbutt Aug 31, 2020
ce11e1d
Remove repeated code
Sep 2, 2020
1086241
Warn about breaking change
Sep 7, 2020
1c8216d
Merge in master
Sep 7, 2020
e73b23d
Resolve merge conflict
Sep 7, 2020
22295f6
Update src/test_utils.jl
willtebbutt Sep 7, 2020
a945ab6
Merge in master
Sep 8, 2020
463d1ea
Merge in master
Sep 8, 2020
523313d
Resolve merge conflict
Sep 21, 2020
b58c649
Bump patch
Sep 21, 2020
2e508e9
Fix up tests
Sep 21, 2020
e821f1a
Remove dead space
Sep 22, 2020
09efe1a
Fix rational quadratic parameter test
Sep 22, 2020
7eaae64
Fix some style issues
Sep 22, 2020
13772f9
Add extra parameter check
Sep 22, 2020
7a7fdf1
Update src/basekernels/rationalquad.jl
willtebbutt Sep 22, 2020
c8965ac
Tweak check
Sep 22, 2020
cc559eb
Fix RQ convention to match EQ
Sep 22, 2020
8c079a5
Refactor tests
Sep 22, 2020
dbd0c16
Fix nn issues
Sep 22, 2020
d250375
Merge branch 'master' into wct/test-utils
Sep 23, 2020
efb18be
Fix weird printing issue
Sep 23, 2020
93ec40d
Update src/test_utils.jl
willtebbutt Sep 24, 2020
c7f2490
Update test/kernels/kernelsum.jl
willtebbutt Sep 24, 2020
bc345f6
Update src/test_utils.jl
willtebbutt Sep 24, 2020
ab492c0
Test FBM kernel
Sep 24, 2020
e2bb5b5
Fix up Gabor
Sep 24, 2020
ef044ea
Loosen dof bound
Sep 25, 2020
e63002e
Perturb test
Sep 25, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,10 +8,12 @@ Distances = "b4f34e82-e78d-54a5-968a-f98e89d6e8f7"
Functors = "d9f16b24-f501-4c13-a1f2-28368ffc5196"
InteractiveUtils = "b77e0a4c-d291-57a0-90e8-8db25a27a240"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
Random = "9a3f8284-a2c9-5f02-9a11-845980a1fd5c"
Requires = "ae029012-a4dd-5104-9daa-d747884805df"
SpecialFunctions = "276daf66-3868-5448-9aa4-cd146d93841b"
StatsBase = "2913bbd2-ae8a-5f71-8c99-4fb6c76f3a91"
StatsFuns = "4c63d2b9-4356-54db-8cca-17b64c39e42c"
Test = "8dfed614-e22c-5e08-85e1-65c5234f0b40"
ZygoteRules = "700de1a5-db45-46bc-99cf-38207098b444"

[compat]
Expand Down
4 changes: 4 additions & 0 deletions src/KernelFunctions.jl
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,8 @@ export NystromFact, nystrom

export spectral_mixture_kernel, spectral_mixture_product_kernel

export ColVecs, RowVecs

export MOInput
export IndependentMOKernel, LatentFactorMOKernel

Expand Down Expand Up @@ -108,6 +110,8 @@ include(joinpath("mokernels", "slfm.jl"))

include("zygote_adjoints.jl")

include("test_utils.jl")

function __init__()
@require Kronecker="2c470bb0-bcc8-11e8-3dad-c9649493f05e" begin
include(joinpath("matrix", "kernelkroneckermat.jl"))
Expand Down
15 changes: 4 additions & 11 deletions src/basekernels/gabor.jl
Original file line number Diff line number Diff line change
Expand Up @@ -57,17 +57,10 @@ end

Base.show(io::IO, κ::GaborKernel) = print(io, "Gabor Kernel (ell = ", κ.ell, ", p = ", κ.p, ")")

function kernelmatrix(κ::GaborKernel, X::AbstractMatrix; obsdim::Int=defaultobs)
return kernelmatrix(κ.kernel, X; obsdim=obsdim)
end
kernelmatrix(κ::GaborKernel, x::AbstractVector) = kernelmatrix(κ.kernel, x)

function kernelmatrix(
κ::GaborKernel, X::AbstractMatrix, Y::AbstractMatrix;
obsdim::Int=defaultobs,
)
return kernelmatrix(κ.kernel, X, Y; obsdim=obsdim)
function kernelmatrix(κ::GaborKernel, x::AbstractVector, y::AbstractVector)
return kernelmatrix(κ.kernel, x, y)
end

function kerneldiagmatrix(κ::GaborKernel, X::AbstractMatrix; obsdim::Int=defaultobs) #TODO Add test
return kerneldiagmatrix(κ.kernel, X; obsdim=obsdim)
end
kerneldiagmatrix(κ::GaborKernel, x::AbstractVector) = kerneldiagmatrix(κ.kernel, x)
4 changes: 2 additions & 2 deletions src/basekernels/nn.jl
Original file line number Diff line number Diff line change
Expand Up @@ -42,13 +42,13 @@ function kernelmatrix(::NeuralNetworkKernel, x::RowVecs, y::RowVecs)
X_2 = sum(x.X .* x.X; dims=2)
Y_2 = sum(y.X .* y.X; dims=2)
XY = x.X * y.X'
return asin.(XY ./ sqrt.((X_2 .+ 1)' * (Y_2 .+ 1)))
return asin.(XY ./ sqrt.((X_2 .+ 1) * (Y_2 .+ 1)'))
end

function kernelmatrix(::NeuralNetworkKernel, x::RowVecs)
X_2_1 = sum(x.X .* x.X; dims=2) .+ 1
XX = x.X * x.X'
return asin.(XX ./ sqrt.(X_2_1' * X_2_1))
return asin.(XX ./ sqrt.(X_2_1 * X_2_1'))
end

Base.show(io::IO, κ::NeuralNetworkKernel) = print(io, "Neural Network Kernel")
4 changes: 3 additions & 1 deletion src/basekernels/periodic.jl
Original file line number Diff line number Diff line change
Expand Up @@ -26,4 +26,6 @@ metric(κ::PeriodicKernel) = Sinus(κ.r)

kappa(κ::PeriodicKernel, d::Real) = exp(- 0.5d)

Base.show(io::IO, κ::PeriodicKernel) = print(io, "Periodic Kernel (length(r) = ", length(κ.r), ")")
function Base.show(io::IO, κ::PeriodicKernel)
print(io, "Periodic Kernel, length(r) = $(length(κ.r))")
end
40 changes: 27 additions & 13 deletions src/basekernels/rationalquad.jl
Original file line number Diff line number Diff line change
@@ -1,48 +1,62 @@
"""
RationalQuadraticKernel(; α = 2.0)
RationalQuadraticKernel(; α=2.0)
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Is it common to use α = 2? Something like α = 1 seems simpler, and is actually used as the default value by e.g. scikit-learn.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmmm good question. I really hadn't thought too much about this and wasn't overly fussed. I worry that 1 will yield a kernel with very long-range correlations (I'm just thinking about what a students-t with dof 1 looks like -- I think they coincide in this case).

I can't say that I'm overly fussed overall, so happy to change to 1 from 2 if you would prefer.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I don't have a strong opinion on this, so do whatever you think is more reasonable. Just noticed this when I compared it with the parameter choices in scikit-learn, and was wondering why we use 2 here.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I think I'm going to leave as-is for the sake of this PR. More than happy for this to be changed in a subsequent PR though.


The rational-quadratic kernel is a Mercer kernel given by the formula:
```
κ(x,y)=(1+||xy||²)^(-α)
κ(x, y) = (1 + ||xy||² / (2α))^(-α)
```
where `α` is a shape parameter of the Euclidean distance. Check [`GammaRationalQuadraticKernel`](@ref) for a generalization.
where `α` is a shape parameter of the Euclidean distance. Check
[`GammaRationalQuadraticKernel`](@ref) for a generalization.
"""
struct RationalQuadraticKernel{Tα<:Real} <: SimpleKernel
α::Vector{Tα}
function RationalQuadraticKernel(;alpha::T=2.0, α::T=alpha) where {T}
@check_args(RationalQuadraticKernel, α, α > zero(T), "α > 1")
@check_args(RationalQuadraticKernel, α, α > zero(T), "α > 0")
return new{T}([α])
end
end

@functor RationalQuadraticKernel

kappa(κ::RationalQuadraticKernel, d²::T) where {T<:Real} = (one(T)+d²/first(κ.α))^(-first(κ.α))
function kappa(κ::RationalQuadraticKernel, d²::T) where {T<:Real}
return (one(T) + d² / (2 * first(κ.α)))^(-first(κ.α))
end

metric(::RationalQuadraticKernel) = SqEuclidean()

Base.show(io::IO, κ::RationalQuadraticKernel) = print(io, "Rational Quadratic Kernel (α = ", first(κ.α), ")")
function Base.show(io::IO, κ::RationalQuadraticKernel)
print(io, "Rational Quadratic Kernel (α = $(first(κ.α)))")
end

"""
`GammaRationalQuadraticKernel([ρ=1.0[,α=2.0[,γ=2.0]]])`
`GammaRationalQuadraticKernel([α=2.0 [, γ=2.0]])`

The Gamma-rational-quadratic kernel is an isotropic Mercer kernel given by the formula:
```
κ(x,y)=(1+ρ^(2γ)||x−y||^(2γ)/α)^(-α)
κ(x, y) = (1 + ||x−y||^γ / α)^(-α)
```
where `α` is a shape parameter of the Euclidean distance and `γ` is another shape parameter.
"""
struct GammaRationalQuadraticKernel{Tα<:Real, Tγ<:Real} <: SimpleKernel
α::Vector{Tα}
γ::Vector{Tγ}
function GammaRationalQuadraticKernel(;alpha::Tα=2.0, gamma::Tγ=2.0, α::Tα=alpha, γ::Tγ=gamma) where {Tα<:Real, Tγ<:Real}
@check_args(GammaRationalQuadraticKernel, α, α > one(Tα), "α > 1")
@check_args(GammaRationalQuadraticKernel, γ, γ >= one(Tγ), "γ >= 1")
function GammaRationalQuadraticKernel(
;alpha::Tα=2.0, gamma::Tγ=2.0, α::Tα=alpha, γ::Tγ=gamma,
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I guess

Suggested change
;alpha::Tα=2.0, gamma::Tγ=2.0, α::Tα=alpha, γ::Tγ=gamma,
;alpha::Tα=2.0, gamma::Tγ=2, α::Tα=alpha, γ::Tγ=gamma,

would be more performant in the default case? The question here is also if we should use alpha = 1 as
default value.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Although probably, since we divide gamma by 2 anyway in the computation it doesn't matter.

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmmm I wonder whether this is what you want. Generally speaking, if you're using this kernel you probably don't want the Integer parameter value, otherwise you would be using the RationalQuadraticKernel. My opinion is that making this parameter a float by default probably does make sense as a consequence of this.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Makes sense.

) where {Tα<:Real, Tγ<:Real}
@check_args(GammaRationalQuadraticKernel, α, α > zero(Tα), "α > 0")
@check_args(GammaRationalQuadraticKernel, γ, zero(γ) < γ <= 2, "0 < γ <= 2")
return new{Tα, Tγ}([α], [γ])
end
end

@functor GammaRationalQuadraticKernel

kappa(κ::GammaRationalQuadraticKernel, d²::T) where {T<:Real} = (one(T)+d²^first(κ.γ)/first(κ.α))^(-first(κ.α))
function kappa(κ::GammaRationalQuadraticKernel, d²::Real)
return (one(d²) + d²^(first(κ.γ) / 2) / first(κ.α))^(-first(κ.α))
end

metric(::GammaRationalQuadraticKernel) = SqEuclidean()

Base.show(io::IO, κ::GammaRationalQuadraticKernel) = print(io, "Gamma Rational Quadratic Kernel (α = ", first(κ.α), ", γ = ", first(κ.γ), ")")
function Base.show(io::IO, κ::GammaRationalQuadraticKernel)
print(io, "Gamma Rational Quadratic Kernel (α = $(first(κ.α)), γ = $(first(κ.γ)))")
end
6 changes: 3 additions & 3 deletions src/basekernels/sm.jl
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ function spectral_mixture_kernel(
γs::AbstractMatrix{<:Real},
ωs::AbstractMatrix{<:Real}
)
spectral_mixture_kernel(SqExponentialKernel(), αs, γs, ωs)
return spectral_mixture_kernel(SqExponentialKernel(), αs, γs, ωs)
end

"""
Expand Down Expand Up @@ -95,14 +95,14 @@ function spectral_mixture_product_kernel(
throw(DimensionMismatch("The dimensions of αs, γs, ans ωs do not match"))
end
return TensorProduct(spectral_mixture_kernel(h, α, reshape(γ, 1, :), reshape(ω, 1, :))
for (α, γ, ω) in zip(eachrow(αs), eachrow(γs), eachrow(ωs)))
for (α, γ, ω) in zip(eachrow(αs), eachrow(γs), eachrow(ωs)))
end

function spectral_mixture_product_kernel(
αs::AbstractMatrix{<:Real},
γs::AbstractMatrix{<:Real},
ωs::AbstractMatrix{<:Real}
)
spectral_mixture_product_kernel(SqExponentialKernel(), αs, γs, ωs)
return spectral_mixture_product_kernel(SqExponentialKernel(), αs, γs, ωs)
end

135 changes: 135 additions & 0 deletions src/test_utils.jl
Original file line number Diff line number Diff line change
@@ -0,0 +1,135 @@
module TestUtils

const __ATOL = 1e-9

using LinearAlgebra
using KernelFunctions
using Random
using Test

"""
test_interface(
k::Kernel,
x0::AbstractVector,
x1::AbstractVector,
x2::AbstractVector;
atol=__ATOL,
)

Run various consistency checks on `k` at the inputs `x0`, `x1`, and `x2`.
`x0` and `x1` should be of the same length with different values, while `x0` and `x2` should
be of different lengths.

test_interface([rng::AbstractRNG], k::Kernel, T::Type{<:AbstractVector}; atol=__ATOL)

`test_interface` offers certain types of test data generation to make running these tests
require less code for common input types. For example, `Vector{<:Real}`, `ColVecs{<:Real}`,
and `RowVecs{<:Real}` are supported. For other input vector types, please provide the data
manually.
"""
function test_interface(
k::Kernel,
x0::AbstractVector,
x1::AbstractVector,
x2::AbstractVector;
atol=__ATOL,
)
# TODO: uncomment the tests of ternary kerneldiagmatrix.

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This might be too much but maybe adding some printing for each of the test would give some nice feedback to the user and avoid stalling in Travis?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Hmm I'm reluctant to do this in this PR. The user has plenty of feedback when the tests fail in the way that the test sets are printed, and I personally prefer to minimise output during the running of tests.

# Ensure that we have the required inputs.
@assert length(x0) == length(x1)
@assert length(x0) ≠ length(x2)

# Check that kerneldiagmatrix basically works.
# @test kerneldiagmatrix(k, x0, x1) isa AbstractVector
# @test length(kerneldiagmatrix(k, x0, x1)) == length(x0)

# Check that pairwise basically works.
@test kernelmatrix(k, x0, x2) isa AbstractMatrix
@test size(kernelmatrix(k, x0, x2)) == (length(x0), length(x2))

# Check that elementwise is consistent with pairwise.
# @test kerneldiagmatrix(k, x0, x1) ≈ diag(kernelmatrix(k, x0, x1)) atol=atol

# Check additional binary elementwise properties for kernels.
# @test kerneldiagmatrix(k, x0, x1) ≈ kerneldiagmatrix(k, x1, x0)
@test kernelmatrix(k, x0, x2) ≈ kernelmatrix(k, x2, x0)' atol=atol

# Check that unary elementwise basically works.
@test kerneldiagmatrix(k, x0) isa AbstractVector
@test length(kerneldiagmatrix(k, x0)) == length(x0)

# Check that unary pairwise basically works.
@test kernelmatrix(k, x0) isa AbstractMatrix
@test size(kernelmatrix(k, x0)) == (length(x0), length(x0))
@test kernelmatrix(k, x0) ≈ kernelmatrix(k, x0)' atol=atol

# Check that unary elementwise is consistent with unary pairwise.
@test kerneldiagmatrix(k, x0) ≈ diag(kernelmatrix(k, x0)) atol=atol

# Check that unary pairwise produces a positive definite matrix (approximately).
@test eigmin(Matrix(kernelmatrix(k, x0))) > -atol

# Check that unary elementwise / pairwise are consistent with the binary versions.
# @test kerneldiagmatrix(k, x0) ≈ kerneldiagmatrix(k, x0, x0) atol=atol
@test kernelmatrix(k, x0) ≈ kernelmatrix(k, x0, x0) atol=atol

# Check that basic kernel evaluation succeeds and is consistent with `kernelmatrix`.
@test k(first(x0), first(x1)) isa Real
@test kernelmatrix(k, x0, x2) ≈ [k(xl, xr) for xl in x0, xr in x2]

tmp = Matrix{Float64}(undef, length(x0), length(x2))
@test kernelmatrix!(tmp, k, x0, x2) ≈ kernelmatrix(k, x0, x2)

tmp_square = Matrix{Float64}(undef, length(x0), length(x0))
@test kernelmatrix!(tmp_square, k, x0) ≈ kernelmatrix(k, x0)

tmp_diag = Vector{Float64}(undef, length(x0))
@test kerneldiagmatrix!(tmp_diag, k, x0) ≈ kerneldiagmatrix(k, x0)
end

function test_interface(
rng::AbstractRNG, k::Kernel, ::Type{Vector{T}}; kwargs...
) where {T<:Real}
test_interface(k, randn(rng, T, 3), randn(rng, T, 3), randn(rng, T, 2); kwargs...)
end

function test_interface(
rng::AbstractRNG, k::Kernel, ::Type{<:ColVecs{T}}; dim_in=2, kwargs...,
) where {T<:Real}
test_interface(
k,
ColVecs(randn(rng, T, dim_in, 3)),
ColVecs(randn(rng, T, dim_in, 3)),
ColVecs(randn(rng, T, dim_in, 2));
kwargs...,
)
end

function test_interface(
rng::AbstractRNG, k::Kernel, ::Type{<:RowVecs{T}}; dim_in=2, kwargs...,
) where {T<:Real}
test_interface(
k,
RowVecs(randn(rng, T, 3, dim_in)),
RowVecs(randn(rng, T, 3, dim_in)),
RowVecs(randn(rng, T, 2, dim_in));
kwargs...,
)
end

function test_interface(k::Kernel, T::Type{<:AbstractVector}; kwargs...)
test_interface(Random.GLOBAL_RNG, k, T; kwargs...)
end

function test_interface(rng::AbstractRNG, k::Kernel, T::Type{<:Real}; kwargs...)
test_interface(rng, k, Vector{T}; kwargs...)
test_interface(rng, k, ColVecs{T}; kwargs...)
test_interface(rng, k, RowVecs{T}; kwargs...)
end

function test_interface(k::Kernel, T::Type{<:Real}=Float64; kwargs...)
test_interface(Random.GLOBAL_RNG, k, T; kwargs...)
end

end # module
2 changes: 1 addition & 1 deletion src/transform/lineartransform.jl
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@ end
(t::LinearTransform)(x::Real) = vec(t.A * x)
(t::LinearTransform)(x::AbstractVector{<:Real}) = t.A * x

_map(t::LinearTransform, x::AbstractVector{<:Real}) = ColVecs(t.A * x')
_map(t::LinearTransform, x::AbstractVector{<:Real}) = ColVecs(t.A * collect(x'))
_map(t::LinearTransform, x::ColVecs) = ColVecs(t.A * x.X)
_map(t::LinearTransform, x::RowVecs) = RowVecs(x.X * t.A')

Expand Down
2 changes: 2 additions & 0 deletions src/utils.jl
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,8 @@ struct RowVecs{T, TX<:AbstractMatrix{T}, S} <: AbstractVector{S}
end
end

RowVecs(x::AbstractVector) = RowVecs(reshape(x, :, 1))

Base.size(D::RowVecs) = (size(D.X, 1),)
Base.getindex(D::RowVecs, i::Int) = view(D.X, i, :)
Base.getindex(D::RowVecs, i::CartesianIndex{1}) = view(D.X, i, :)
Expand Down
19 changes: 14 additions & 5 deletions test/basekernels/constant.jl
Original file line number Diff line number Diff line change
Expand Up @@ -2,31 +2,40 @@
@testset "ZeroKernel" begin
k = ZeroKernel()
@test eltype(k) == Any
@test kappa(k,2.0) == 0.0
@test kappa(k, 2.0) == 0.0
@test KernelFunctions.metric(ZeroKernel()) == KernelFunctions.Delta()
@test repr(k) == "Zero Kernel"

# Standardised tests.
TestUtils.test_interface(k, Float64)
test_ADs(ZeroKernel)
end
@testset "WhiteKernel" begin
k = WhiteKernel()
@test eltype(k) == Any
@test kappa(k,1.0) == 1.0
@test kappa(k,0.0) == 0.0
@test kappa(k, 1.0) == 1.0
@test kappa(k, 0.0) == 0.0
@test EyeKernel == WhiteKernel
@test metric(WhiteKernel()) == KernelFunctions.Delta()
@test repr(k) == "White Kernel"

# Standardised tests.
TestUtils.test_interface(k, Float64)
test_ADs(WhiteKernel)
end
@testset "ConstantKernel" begin
c = 2.0
k = ConstantKernel(c=c)
@test eltype(k) == Any
@test kappa(k,1.0) == c
@test kappa(k,0.5) == c
@test kappa(k, 1.0) == c
@test kappa(k, 0.5) == c
@test metric(ConstantKernel()) == KernelFunctions.Delta()
@test metric(ConstantKernel(c=2.0)) == KernelFunctions.Delta()
@test repr(k) == "Constant Kernel (c = $(c))"
test_params(k, ([c],))

# Standardised tests.
TestUtils.test_interface(k, Float64)
test_ADs(c->ConstantKernel(c=first(c)), [c])
end
end
Loading