Skip to content

[Nonlinear] fix performance of evaluating univariate operators #2620

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 3 commits into from
Feb 10, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion src/Nonlinear/ReverseAD/forward_over_reverse.jl
Original file line number Diff line number Diff line change
Expand Up @@ -376,7 +376,7 @@ function _forward_eval_ϵ(
@inbounds child_idx = children_arr[ex.adj.colptr[k]]
f′′ = Nonlinear.eval_univariate_hessian(
user_operators,
user_operators.univariate_operators[node.index],
node.index,
ex.forward_storage[child_idx],
)
partials_storage_ϵ[child_idx] = f′′ * storage_ϵ[child_idx]
Expand Down
11 changes: 4 additions & 7 deletions src/Nonlinear/ReverseAD/reverse_mode.jl
Original file line number Diff line number Diff line change
Expand Up @@ -248,16 +248,13 @@ function _forward_eval(
end
elseif node.type == Nonlinear.NODE_CALL_UNIVARIATE
child_idx = children_arr[f.adj.colptr[k]]
f.forward_storage[k] = Nonlinear.eval_univariate_function(
ret_f, ret_f′ = Nonlinear.eval_univariate_function_and_gradient(
operators,
operators.univariate_operators[node.index],
f.forward_storage[child_idx],
)
f.partials_storage[child_idx] = Nonlinear.eval_univariate_gradient(
operators,
operators.univariate_operators[node.index],
node.index,
f.forward_storage[child_idx],
)
f.forward_storage[k] = ret_f
f.partials_storage[child_idx] = ret_f′
elseif node.type == Nonlinear.NODE_COMPARISON
children_idx = SparseArrays.nzrange(f.adj, k)
result = true
Expand Down
2 changes: 1 addition & 1 deletion src/Nonlinear/model.jl
Original file line number Diff line number Diff line change
Expand Up @@ -377,7 +377,7 @@ function evaluate(
child_idx = children_arr[adj.colptr[k]]
storage[k] = eval_univariate_function(
model.operators,
model.operators.univariate_operators[node.index],
node.index,
storage[child_idx],
)
elseif node.type == NODE_COMPARISON
Expand Down
184 changes: 172 additions & 12 deletions src/Nonlinear/operators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -63,6 +63,33 @@
end
end

function eval_univariate_function(operator::_UnivariateOperator, x::T) where {T}
ret = operator.f(x)
check_return_type(T, ret)
return ret::T
end

function eval_univariate_gradient(operator::_UnivariateOperator, x::T) where {T}
ret = operator.f′(x)
check_return_type(T, ret)
return ret::T
end

function eval_univariate_hessian(operator::_UnivariateOperator, x::T) where {T}
ret = operator.f′′(x)
check_return_type(T, ret)
return ret::T
end

function eval_univariate_function_and_gradient(
operator::_UnivariateOperator,
x::T,
) where {T}
ret_f = eval_univariate_function(operator, x)
ret_f′ = eval_univariate_gradient(operator, x)
return ret_f, ret_f′
end

struct _MultivariateOperator{F,F′,F′′}
N::Int
f::F
Expand Down Expand Up @@ -517,81 +544,214 @@
"""
eval_univariate_function(
registry::OperatorRegistry,
op::Symbol,
op::Union{Symbol,Integer},
x::T,
) where {T}

Evaluate the operator `op(x)::T`, where `op` is a univariate function in
`registry`.

If `op isa Integer`, then `op` is the index in
`registry.univariate_operators[op]`.

## Example

```jldoctest
julia> import MathOptInterface as MOI

julia> r = MOI.Nonlinear.OperatorRegistry();

julia> MOI.Nonlinear.eval_univariate_function(r, :abs, -1.2)
1.2

julia> r.univariate_operators[3]
:abs

julia> MOI.Nonlinear.eval_univariate_function(r, 3, -1.2)
1.2
```
"""
function eval_univariate_function(
registry::OperatorRegistry,
op::Symbol,
x::T,
) where {T}
id = registry.univariate_operator_to_id[op]
return eval_univariate_function(registry, id, x)
end

function eval_univariate_function(
registry::OperatorRegistry,
id::Integer,
x::T,
) where {T}
if id <= registry.univariate_user_operator_start
f, _ = _eval_univariate(id, x)
return f::T
end
offset = id - registry.univariate_user_operator_start
operator = registry.registered_univariate_operators[offset]
ret = operator.f(x)
check_return_type(T, ret)
return ret::T
return eval_univariate_function(operator, x)
end

"""
eval_univariate_gradient(
registry::OperatorRegistry,
op::Symbol,
op::Union{Symbol,Integer},
x::T,
) where {T}

Evaluate the first-derivative of the operator `op(x)::T`, where `op` is a
univariate function in `registry`.

If `op isa Integer`, then `op` is the index in
`registry.univariate_operators[op]`.

## Example

```jldoctest
julia> import MathOptInterface as MOI

julia> r = MOI.Nonlinear.OperatorRegistry();

julia> MOI.Nonlinear.eval_univariate_gradient(r, :abs, -1.2)
-1.0

julia> r.univariate_operators[3]
:abs

julia> MOI.Nonlinear.eval_univariate_gradient(r, 3, -1.2)
-1.0
```
"""
function eval_univariate_gradient(
registry::OperatorRegistry,
op::Symbol,
x::T,
) where {T}
id = registry.univariate_operator_to_id[op]
return eval_univariate_gradient(registry, id, x)
end

function eval_univariate_gradient(
registry::OperatorRegistry,
id::Integer,
x::T,
) where {T}
if id <= registry.univariate_user_operator_start
_, f′ = _eval_univariate(id, x)
return f′::T
end
offset = id - registry.univariate_user_operator_start
operator = registry.registered_univariate_operators[offset]
ret = operator.f′(x)
check_return_type(T, ret)
return ret::T
return eval_univariate_gradient(operator, x)
end

"""
eval_univariate_function_and_gradient(
registry::OperatorRegistry,
op::Union{Symbol,Integer},
x::T,
)::Tuple{T,T} where {T}

Evaluate the function and first-derivative of the operator `op(x)::T`, where
`op` is a univariate function in `registry`.

If `op isa Integer`, then `op` is the index in
`registry.univariate_operators[op]`.

## Example

```jldoctest
julia> import MathOptInterface as MOI

julia> r = MOI.Nonlinear.OperatorRegistry();

julia> MOI.Nonlinear.eval_univariate_function_and_gradient(r, :abs, -1.2)
(1.2, -1.0)

julia> r.univariate_operators[3]
:abs

julia> MOI.Nonlinear.eval_univariate_function_and_gradient(r, 3, -1.2)
(1.2, -1.0)
```
"""
function eval_univariate_function_and_gradient(

Check warning on line 680 in src/Nonlinear/operators.jl

View check run for this annotation

Codecov / codecov/patch

src/Nonlinear/operators.jl#L680

Added line #L680 was not covered by tests
registry::OperatorRegistry,
op::Symbol,
x::T,
) where {T}
id = registry.univariate_operator_to_id[op]
return eval_univariate_function_and_gradient(registry, id, x)
end

function eval_univariate_function_and_gradient(
registry::OperatorRegistry,
id::Integer,
x::T,
) where {T}
if id <= registry.univariate_user_operator_start
return _eval_univariate(id, x)::Tuple{T,T}
end
offset = id - registry.univariate_user_operator_start
operator = registry.registered_univariate_operators[offset]
return eval_univariate_function_and_gradient(operator, x)
end

"""
eval_univariate_hessian(
registry::OperatorRegistry,
op::Symbol,
op::Union{Symbol,Integer},
x::T,
) where {T}

Evaluate the second-derivative of the operator `op(x)::T`, where `op` is a
univariate function in `registry`.

If `op isa Integer`, then `op` is the index in
`registry.univariate_operators[op]`.

## Example

```jldoctest
julia> import MathOptInterface as MOI

julia> r = MOI.Nonlinear.OperatorRegistry();

julia> MOI.Nonlinear.eval_univariate_hessian(r, :sin, 1.0)
-0.8414709848078965

julia> r.univariate_operators[16]
:sin

julia> MOI.Nonlinear.eval_univariate_hessian(r, 16, 1.0)
-0.8414709848078965

julia> -sin(1.0)
-0.8414709848078965
```
"""
function eval_univariate_hessian(
registry::OperatorRegistry,
op::Symbol,
x::T,
) where {T}
id = registry.univariate_operator_to_id[op]
return eval_univariate_hessian(registry, id, x)
end

function eval_univariate_hessian(
registry::OperatorRegistry,
id::Integer,
x::T,
) where {T}
if id <= registry.univariate_user_operator_start
return _eval_univariate_2nd_deriv(id, x)::T
end
offset = id - registry.univariate_user_operator_start
operator = registry.registered_univariate_operators[offset]
ret = operator.f′′(x)
check_return_type(T, ret)
return ret::T
return eval_univariate_hessian(operator, x)
end

"""
Expand Down
61 changes: 49 additions & 12 deletions test/Nonlinear/Nonlinear.jl
Original file line number Diff line number Diff line change
Expand Up @@ -360,28 +360,65 @@ end

function test_eval_univariate_function()
r = Nonlinear.OperatorRegistry()
@test Nonlinear.eval_univariate_function(r, :+, 1.0) == 1.0
@test Nonlinear.eval_univariate_function(r, :-, 1.0) == -1.0
@test Nonlinear.eval_univariate_function(r, :abs, -1.1) == 1.1
@test Nonlinear.eval_univariate_function(r, :abs, 1.1) == 1.1
for (op, x, y) in [
(:+, 1.0, 1.0),
(:-, 1.0, -1.0),
(:abs, -1.1, 1.1),
(:abs, 1.1, 1.1),
(:sin, 1.1, sin(1.1)),
]
id = r.univariate_operator_to_id[op]
@test Nonlinear.eval_univariate_function(r, op, x) == y
@test Nonlinear.eval_univariate_function(r, id, x) == y
end
return
end

function test_eval_univariate_gradient()
r = Nonlinear.OperatorRegistry()
@test Nonlinear.eval_univariate_gradient(r, :+, 1.2) == 1.0
@test Nonlinear.eval_univariate_gradient(r, :-, 1.2) == -1.0
@test Nonlinear.eval_univariate_gradient(r, :abs, -1.1) == -1.0
@test Nonlinear.eval_univariate_gradient(r, :abs, 1.1) == 1.0
for (op, x, y) in [
(:+, 1.2, 1.0),
(:-, 1.2, -1.0),
(:abs, -1.1, -1.0),
(:abs, 1.1, 1.0),
(:sin, 1.1, cos(1.1)),
]
id = r.univariate_operator_to_id[op]
@test Nonlinear.eval_univariate_gradient(r, op, x) == y
@test Nonlinear.eval_univariate_gradient(r, id, x) == y
end
return
end

function test_eval_univariate_function_and_gradient()
r = Nonlinear.OperatorRegistry()
for (op, x, y) in [
(:+, 1.2, (1.2, 1.0)),
(:-, 1.2, (-1.2, -1.0)),
(:abs, -1.1, (1.1, -1.0)),
(:abs, 1.1, (1.1, 1.0)),
(:sin, 1.1, (sin(1.1), cos(1.1))),
]
id = r.univariate_operator_to_id[op]
@test Nonlinear.eval_univariate_function_and_gradient(r, op, x) == y
@test Nonlinear.eval_univariate_function_and_gradient(r, id, x) == y
end
return
end

function test_eval_univariate_hessian()
r = Nonlinear.OperatorRegistry()
@test Nonlinear.eval_univariate_hessian(r, :+, 1.2) == 0.0
@test Nonlinear.eval_univariate_hessian(r, :-, 1.2) == 0.0
@test Nonlinear.eval_univariate_hessian(r, :abs, -1.1) == 0.0
@test Nonlinear.eval_univariate_hessian(r, :abs, 1.1) == 0.0
for (op, x, y) in [
(:+, 1.2, 0.0),
(:-, 1.2, 0.0),
(:abs, -1.1, 0.0),
(:abs, 1.1, 0.0),
(:sin, 1.0, -sin(1.0)),
]
id = r.univariate_operator_to_id[op]
@test Nonlinear.eval_univariate_hessian(r, op, x) == y
@test Nonlinear.eval_univariate_hessian(r, id, x) == y
end
return
end

Expand Down
Loading