Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
59ce6bd
Speed up `np.add.reduce`
Zeroto521 Jan 16, 2026
1c66a5c
BUG: `MatrixExpr.mean(axis=1)` will crush kernel
Zeroto521 Jan 16, 2026
a2a57c1
Add tests for matrix mean performance and type
Zeroto521 Jan 16, 2026
7804c10
Update changelog for MatrixExpr.add.reduce optimization
Zeroto521 Jan 16, 2026
4a8a233
Remove MatrixExpr.sum method and update _core_sum docstring
Zeroto521 Jan 16, 2026
377ec6a
Remove sum method from MatrixExpr stub
Zeroto521 Jan 16, 2026
1f406a4
Improve MatrixExpr ufunc handling and remove unused include
Zeroto521 Jan 16, 2026
a933366
Fix MatrixExpr matmul return type and update tests
Zeroto521 Jan 16, 2026
5cc69d2
Refactor matrix sum performance tests timing logic
Zeroto521 Jan 16, 2026
efc36b0
Update matmul return type assertion in test
Zeroto521 Jan 16, 2026
b0d190a
Refactor matrix variable tests to use view casting
Zeroto521 Jan 16, 2026
103a96f
define the variable
Zeroto521 Jan 16, 2026
e0b8746
Refactor __array_ufunc__ in MatrixExpr for clarity
Zeroto521 Jan 18, 2026
bc98e43
Add type hints and docstring to __array_ufunc__ in MatrixExpr
Zeroto521 Jan 19, 2026
aad49c1
Merge branch 'master' into issue/1156
Zeroto521 Jan 19, 2026
311c62d
Add comment clarifying 'reduce' method handling
Zeroto521 Jan 19, 2026
3896151
Merge branch 'master' into issue/1156
Zeroto521 Jan 20, 2026
cc5294d
Refactor matrix sum test for clarity and type consistency
Zeroto521 Jan 20, 2026
6d9cbc4
Fix handling of 'out' kwarg in __array_ufunc__
Zeroto521 Jan 20, 2026
4c29df8
Fix comments to reference mean instead of sum
Zeroto521 Jan 20, 2026
f29ff13
Pass 'False' to _ensure_array
Zeroto521 Jan 20, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
- Fixed incorrect getVal() result when _bestSol.sol was outdated
### Changed
- changed default value of enablepricing flag to True
- Speed up MatrixExpr.add.reduce via quicksum
- Speed up np.ndarray(..., dtype=np.float64) @ MatrixExpr
- Minimum numpy version increased from 1.16.0 to 1.19.0
### Removed
Expand Down
132 changes: 71 additions & 61 deletions src/pyscipopt/matrix.pxi
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@
# TODO Cythonize things. Improve performance.
# TODO Add tests
"""

from typing import Literal, Optional, Tuple, Union
import numpy as np
try:
Expand Down Expand Up @@ -71,7 +70,7 @@ class MatrixExpr(np.ndarray):
The ufunc object that was called.

method : {"__call__", "reduce", "reduceat", "accumulate", "outer", "at"}
A string indicating which UFunc method was called.
A string indicating which ufunc method was called.

*args : tuple
The input arguments to the ufunc.
Expand All @@ -81,77 +80,28 @@ class MatrixExpr(np.ndarray):

Returns
-------
Expr, GenExpr, MatrixExpr
Expr, MatrixExpr
The result of the ufunc operation is wrapped back into a MatrixExpr if
applicable.

"""
res = NotImplemented
# Unboxing MatrixExpr to stop __array_ufunc__ recursion
args = tuple(_ensure_array(arg) for arg in args)
if method == "reduce": # Handle reduction operations, e.g., np.sum(a)
if ufunc is np.add:
res = _core_sum(args[0], **kwargs)

if method == "__call__": # Standard ufunc call, e.g., np.add(a, b)
if ufunc in {np.matmul, np.dot}:
res = _core_dot(_ensure_array(args[0]), _ensure_array(args[1]))
res = _core_dot(args[0], args[1])

if res is NotImplemented:
# Unboxing MatrixExpr to stop __array_ufunc__ recursion
args = tuple(_ensure_array(arg) for arg in args)
if "out" in kwargs: # Unboxing MatrixExpr to stop __array_ufunc__ recursion
kwargs["out"] = tuple(_ensure_array(arg, False) for arg in kwargs["out"])
res = super().__array_ufunc__(ufunc, method, *args, **kwargs)
return res.view(MatrixExpr) if isinstance(res, np.ndarray) else res

def sum(
self,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
**kwargs,
) -> Union[Expr, MatrixExpr]:
"""
Return the sum of the array elements over the given axis.

Parameters
----------
axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default, axis=None, will
sum all of the elements of the input array. If axis is negative it counts
from the last to the first axis. If axis is a tuple of ints, a sum is
performed on all of the axes specified in the tuple instead of a single axis
or all the axes as before.

keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the input array.

**kwargs : ignored
Additional keyword arguments are ignored. They exist for compatibility
with `numpy.ndarray.sum`.

Returns
-------
Expr or MatrixExpr
If the sum is performed over all axes, return an Expr, otherwise return
a MatrixExpr.

"""
axis: Tuple[int, ...] = normalize_axis_tuple(
range(self.ndim) if axis is None else axis, self.ndim
)
if len(axis) == self.ndim:
res = quicksum(self.flat)
return (
np.array([res], dtype=object).reshape([1] * self.ndim).view(MatrixExpr)
if keepdims
else res
)

keep_axes = tuple(i for i in range(self.ndim) if i not in axis)
shape = (
tuple(1 if i in axis else self.shape[i] for i in range(self.ndim))
if keepdims
else tuple(self.shape[i] for i in keep_axes)
)
return np.apply_along_axis(
quicksum, -1, self.transpose(keep_axes + axis).reshape(shape + (-1,))
).view(MatrixExpr)

def __le__(self, other: Union[float, int, "Expr", np.ndarray, "MatrixExpr"]) -> MatrixExprCons:
return _matrixexpr_richcmp(self, other, 1)

Expand Down Expand Up @@ -195,6 +145,7 @@ class MatrixExpr(np.ndarray):
class MatrixGenExpr(MatrixExpr):
pass


class MatrixExprCons(np.ndarray):

def __le__(self, other: Union[float, int, np.ndarray]) -> MatrixExprCons:
Expand Down Expand Up @@ -288,3 +239,62 @@ def _core_dot_2d(cnp.ndarray a, cnp.ndarray x) -> np.ndarray:
res[i, j] = quicksum(a_view[i, idx] * x[idx, j] for idx in nonzero)

return res


def _core_sum(
cnp.ndarray a,
axis: Optional[Union[int, Tuple[int, ...]]] = None,
keepdims: bool = False,
**kwargs,
) -> Union[Expr, np.ndarray]:
"""
Return the sum of the array elements over the given axis.

Parameters
----------
a : cnp.ndarray
A `np.ndarray` of type `object` and containing `Expr` objects.

axis : None or int or tuple of ints, optional
Axis or axes along which a sum is performed. The default, axis=None, will
sum all of the elements of the input array. If axis is negative it counts
from the last to the first axis. If axis is a tuple of ints, a sum is
performed on all of the axes specified in the tuple instead of a single axis
or all the axes as before.

keepdims : bool, optional
If this is set to True, the axes which are reduced are left in the result as
dimensions with size one. With this option, the result will broadcast
correctly against the input array.

**kwargs : ignored
Additional keyword arguments are ignored. They exist for compatibility
with `numpy.ndarray.sum`.

Returns
-------
Expr or np.ndarray
If the sum is performed over all axes, return an Expr, otherwise return
a np.ndarray.

"""
axis: Tuple[int, ...] = normalize_axis_tuple(
range(a.ndim) if axis is None else axis, a.ndim
)
if len(axis) == a.ndim:
res = quicksum(a.flat)
return (
np.array([res], dtype=object).reshape([1] * a.ndim)
if keepdims
else res
)

keep_axes = tuple(i for i in range(a.ndim) if i not in axis)
shape = (
tuple(1 if i in axis else a.shape[i] for i in range(a.ndim))
if keepdims
else tuple(a.shape[i] for i in keep_axes)
)
return np.apply_along_axis(
quicksum, -1, a.transpose(keep_axes + axis).reshape(shape + (-1,))
)
3 changes: 0 additions & 3 deletions src/pyscipopt/scip.pyi
Original file line number Diff line number Diff line change
Expand Up @@ -509,9 +509,6 @@ class MatrixConstraint(numpy.ndarray):
def isStickingAtNode(self) -> Incomplete: ...

class MatrixExpr(numpy.ndarray):
def sum( # type: ignore[override]
self, axis: Incomplete = ..., keepdims: Incomplete = ..., **kwargs: Incomplete
) -> Incomplete: ...
def __add__(self, other: Incomplete) -> Incomplete: ...
def __eq__(self, other: Incomplete)-> Incomplete: ...
def __ge__(self, other: Incomplete) -> MatrixExprCons: ...
Expand Down
62 changes: 44 additions & 18 deletions tests/test_matrix_variable.py
Original file line number Diff line number Diff line change
Expand Up @@ -248,49 +248,75 @@ def test_matrix_sum_axis():
)
def test_matrix_sum_result(axis, keepdims):
# directly compare the result of np.sum and MatrixExpr.sum
_getVal = np.vectorize(lambda e: e.terms[CONST])
_getVal = np.vectorize(lambda e: e[CONST])
a = np.arange(6).reshape((1, 2, 3))

np_res = a.sum(axis, keepdims=keepdims)
scip_res = MatrixExpr.sum(a, axis, keepdims=keepdims)
assert (np_res == _getVal(scip_res)).all()
assert np_res.shape == _getVal(scip_res).shape
scip_res = _getVal(a.view(MatrixExpr).sum(axis, keepdims=keepdims)).view(np.ndarray)
assert (np_res == scip_res).all()
assert np_res.shape == scip_res.shape


@pytest.mark.parametrize("n", [50, 100])
def test_matrix_sum_axis_is_none_performance(n):
model = Model()
x = model.addMatrixVar((n, n))

# Original sum via `np.ndarray.sum`, `np.sum` will call subclass method
start_orig = time()
np.ndarray.sum(x)
end_orig = time()
# Original sum via `np.ndarray.sum`
start = time()
x.view(np.ndarray).sum()
orig = time() - start

# Optimized sum via `quicksum`
start_matrix = time()
start = time()
x.sum()
end_matrix = time()
matrix = time() - start

assert model.isGT(end_orig - start_orig, end_matrix - start_matrix)
assert model.isGT(orig, matrix)


@pytest.mark.parametrize("n", [50, 100])
def test_matrix_sum_axis_not_none_performance(n):
model = Model()
x = model.addMatrixVar((n, n))

# Original sum via `np.ndarray.sum`, `np.sum` will call subclass method
start_orig = time()
np.ndarray.sum(x, axis=0)
end_orig = time()
# Original sum via `np.ndarray.sum`
start = time()
x.view(np.ndarray).sum(axis=0)
orig = time() - start

# Optimized sum via `quicksum`
start_matrix = time()
start = time()
x.sum(axis=0)
end_matrix = time()
matrix = time() - start

assert model.isGT(orig, matrix)


@pytest.mark.parametrize("n", [50, 100])
def test_matrix_mean_performance(n):
model = Model()
x = model.addMatrixVar((n, n))

# Original mean via `np.ndarray.mean`
start = time()
x.view(np.ndarray).mean(axis=0)
orig = time() - start

# Optimized mean via `quicksum`
start = time()
x.mean(axis=0)
matrix = time() - start

assert model.isGT(orig, matrix)


def test_matrix_mean():
model = Model()
x = model.addMatrixVar((2, 2))

assert model.isGT(end_orig - start_orig, end_matrix - start_matrix)
assert isinstance(x.mean(), Expr)
assert isinstance(x.mean(1), MatrixExpr)


@pytest.mark.parametrize("n", [50, 100])
Expand Down
Loading