Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
77a7faa
Add L2 loss to neural network example
claude Apr 2, 2026
5dc6e5c
Add tests for binary broadcasting, norm, and L2 loss
claude Apr 2, 2026
fbeba4f
Add LinearAlgebra to package dependencies
claude Apr 2, 2026
fd26fab
Remove @objective call unsupported with array expressions
claude Apr 3, 2026
5f5ce9e
Fix method ambiguity with JuMP's norm for AbstractJuMPScalar arrays
claude Apr 3, 2026
167b377
Define norm for concrete types to avoid ambiguity
claude Apr 3, 2026
41aef87
Temporarily remove new tests to isolate failure
claude Apr 3, 2026
0fe5fdf
Add back tests with import at module level
claude Apr 3, 2026
4454406
Test only norm to isolate failure
claude Apr 3, 2026
104c536
Add back test_binary_broadcasting to isolate failure
claude Apr 3, 2026
e0e188e
Add back test_l2_loss to isolate failure
claude Apr 3, 2026
47f36d9
Simplify test_l2_loss to isolate exact failure point
claude Apr 3, 2026
5550244
Minimal test_l2_loss: broadcast GenericArrayExpr .- Matrix
claude Apr 3, 2026
793133c
Full test_l2_loss with neural net forward pass
claude Apr 3, 2026
bcadb8d
Split test_l2_loss into simple and tanh variants
claude Apr 3, 2026
e4c029a
Add test_l2_loss_nested: W2 * tanh.(W1 * X) .- Y
claude Apr 3, 2026
117c2c0
Full assertions in test_l2_loss_nested
claude Apr 3, 2026
6fc1ec7
Minimal norm test on nested broadcast result
claude Apr 3, 2026
15ac1e3
Test manual NonlinearExpr creation to isolate norm dispatch issue
claude Apr 3, 2026
1291166
Add _is_real for GenericArrayExpr and restore full tests
claude Apr 3, 2026
261aea0
Remove debug test functions
claude Apr 3, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions Project.toml
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ Calculus = "49dc2e85-a5d0-5ad3-a950-438e2897f1b9"
DataStructures = "864edb3b-99cc-5e75-8d2d-829cb0a9cfe8"
ForwardDiff = "f6369f11-7733-5829-9624-2563aa707210"
JuMP = "4076af6c-e467-56ae-b986-b466b2749572"
LinearAlgebra = "37e2e46d-f89d-539d-b4ee-838fcccc9c8e"
MathOptInterface = "b8f27783-ece8-5eb3-8dc8-9495eed66fee"
NaNMath = "77ba4419-2d1f-58cd-9bb1-8ffee604a2e3"
OrderedCollections = "bac558e1-5e72-5ebc-8fee-abe8a469f55d"
Expand Down
5 changes: 4 additions & 1 deletion perf/neural.jl
Original file line number Diff line number Diff line change
@@ -1,10 +1,13 @@
# Needs https://github.com/jump-dev/JuMP.jl/pull/3451
using JuMP
using ArrayDiff
import LinearAlgebra

n = 2
X = rand(n, n)
Y = rand(n, n)
model = Model()
@variable(model, W1[1:n, 1:n], container = ArrayDiff.ArrayOfVariables)
@variable(model, W2[1:n, 1:n], container = ArrayDiff.ArrayOfVariables)
W2 * tanh.(W1 * X)
Y_hat = W2 * tanh.(W1 * X)
loss = LinearAlgebra.norm(Y_hat .- Y)
4 changes: 3 additions & 1 deletion src/JuMP/nlp_expr.jl
Original file line number Diff line number Diff line change
Expand Up @@ -19,4 +19,6 @@ end

Base.size(expr::GenericArrayExpr) = expr.size

JuMP.variable_ref_type(::Type{GenericMatrixExpr{V}}) where {V} = V
JuMP.variable_ref_type(::Type{GenericArrayExpr{V,N}}) where {V,N} = V

JuMP._is_real(::GenericArrayExpr) = true
34 changes: 34 additions & 0 deletions src/JuMP/operators.jl
Original file line number Diff line number Diff line change
Expand Up @@ -28,3 +28,37 @@ end
function Base.broadcasted(op::Function, x::AbstractJuMPArray)
return _broadcast(JuMP.variable_ref_type(x), op, x)
end

function Base.broadcasted(op::Function, x::AbstractJuMPArray, y::AbstractArray)
return _broadcast(JuMP.variable_ref_type(x), op, x, y)
end

function Base.broadcasted(op::Function, x::AbstractArray, y::AbstractJuMPArray)
return _broadcast(JuMP.variable_ref_type(y), op, x, y)
end

function Base.broadcasted(
op::Function,
x::AbstractJuMPArray,
y::AbstractJuMPArray,
)
return _broadcast(JuMP.variable_ref_type(x), op, x, y)
end

import LinearAlgebra

function _array_norm(x::AbstractJuMPArray)
V = JuMP.variable_ref_type(x)
return JuMP.GenericNonlinearExpr{V}(:norm, Any[x])
end

# Define norm for each concrete AbstractJuMPArray subtype to avoid
# ambiguity with JuMP's error-throwing
# LinearAlgebra.norm(::AbstractArray{<:AbstractJuMPScalar})
function LinearAlgebra.norm(x::GenericArrayExpr)
return _array_norm(x)
end

function LinearAlgebra.norm(x::ArrayOfVariables)
return _array_norm(x)
end
62 changes: 62 additions & 0 deletions test/JuMP.jl
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ using Test

using JuMP
using ArrayDiff
import LinearAlgebra

function runtests()
for name in names(@__MODULE__; all = true)
Expand Down Expand Up @@ -54,6 +55,67 @@ function test_neural()
return
end

function test_binary_broadcasting()
n = 2
model = Model()
@variable(model, W[1:n, 1:n], container = ArrayDiff.ArrayOfVariables)
Y = rand(n, n)
D1 = W .- Y
@test D1 isa ArrayDiff.MatrixExpr
@test D1.head == :-
@test D1.broadcasted
@test size(D1) == (n, n)
@test D1.args[1] === W
@test D1.args[2] === Y
D2 = Y .- W
@test D2 isa ArrayDiff.MatrixExpr
@test D2.head == :-
@test D2.broadcasted
@test D2.args[1] === Y
@test D2.args[2] === W
@variable(model, V[1:n, 1:n], container = ArrayDiff.ArrayOfVariables)
D3 = W .- V
@test D3 isa ArrayDiff.MatrixExpr
@test D3.head == :-
@test D3.broadcasted
@test D3.args[1] === W
@test D3.args[2] === V
return
end

function test_norm()
n = 2
model = Model()
@variable(model, W[1:n, 1:n], container = ArrayDiff.ArrayOfVariables)
loss = LinearAlgebra.norm(W)
@test loss isa JuMP.NonlinearExpr
@test loss.head == :norm
@test length(loss.args) == 1
@test loss.args[1] === W
return
end

function test_l2_loss()
n = 2
X = rand(n, n)
Y = rand(n, n)
model = Model()
@variable(model, W1[1:n, 1:n], container = ArrayDiff.ArrayOfVariables)
@variable(model, W2[1:n, 1:n], container = ArrayDiff.ArrayOfVariables)
Y_hat = W2 * tanh.(W1 * X)
diff_expr = Y_hat .- Y
@test diff_expr isa ArrayDiff.MatrixExpr
@test diff_expr.head == :-
@test diff_expr.broadcasted
@test diff_expr.args[1] === Y_hat
@test diff_expr.args[2] === Y
loss = LinearAlgebra.norm(diff_expr)
@test loss isa JuMP.NonlinearExpr
@test loss.head == :norm
@test loss.args[1] === diff_expr
return
end

end # module

TestJuMP.runtests()
Loading