Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
7 changes: 7 additions & 0 deletions doc/library/xtensor/signal.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
(libdoc_xtensor_linalg)=
# `xtensor.signal` -- Signal processing operations

```{eval-rst}
.. automodule:: pytensor.xtensor.signal
:members:
```
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,7 @@ lines-after-imports = 2
"tests/link/numba/**/test_*.py" = ["E402"]
"tests/link/pytorch/**/test_*.py" = ["E402"]
"tests/link/mlx/**/test_*.py" = ["E402"]
"tests/xtensor/**/test_*.py" = ["E402"]



Expand Down
6 changes: 3 additions & 3 deletions pytensor/tensor/elemwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@
normalize_reduce_axis,
)
from pytensor.tensor.variable import TensorVariable
from pytensor.utils import uniq
from pytensor.utils import uniq, unzip


class DimShuffle(ExternalCOp):
Expand Down Expand Up @@ -765,8 +765,8 @@ def _c_all(self, node, nodename, inames, onames, sub):
# assert that inames and inputs order stay consistent.
# This is to protect again futur change of uniq.
assert len(inames) == len(inputs)
ii, iii = list(
zip(*uniq(list(zip(_inames, node.inputs, strict=True))), strict=True)
ii, iii = unzip(
uniq(list(zip(_inames, node.inputs, strict=True))), n=2, strict=True
)
assert all(x == y for x, y in zip(ii, inames, strict=True))
assert all(x == y for x, y in zip(iii, inputs, strict=True))
Expand Down
3 changes: 2 additions & 1 deletion pytensor/tensor/optimize.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,7 @@
from pytensor.tensor.reshape import pack, unpack
from pytensor.tensor.slinalg import solve
from pytensor.tensor.variable import TensorVariable, Variable
from pytensor.utils import unzip


# scipy.optimize can be slow to import, and will not be used by most users
Expand Down Expand Up @@ -297,7 +298,7 @@ def compute_implicit_gradients(
# No differentiable arguments, return disconnected gradients
return arg_grads

outer_args_to_diff, df_dthetas = zip(*valid_args_and_grads)
outer_args_to_diff, df_dthetas = unzip(valid_args_and_grads, n=2)

replace = dict(zip(fgraph.inputs, (x_star, *args), strict=True))
df_dx_star, *df_dthetas_stars = graph_replace(
Expand Down
10 changes: 7 additions & 3 deletions pytensor/tensor/rewriting/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -1165,9 +1165,13 @@ def unconditional_constant_folding(fgraph, node):
)
required = thunk()
except NotImplementedError:
# Not all Ops have a python implementation
thunk = node.op.make_thunk(node, storage_map, compute_map, no_recycling=[])
required = thunk()
try:
# Not all Ops have a python implementation
thunk = node.op.make_thunk(node, storage_map, compute_map, no_recycling=[])
required = thunk()
except NotImplementedError:
# And some Ops (like dummy Ops) can never be evaluated
return None

# A node whose inputs are all provided should always return successfully
assert not required
Expand Down
9 changes: 5 additions & 4 deletions pytensor/tensor/signal/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -195,7 +195,8 @@ def c_code(self, node, name, inputs, outputs, sub):
return code


blockwise_convolve_1d = Blockwise(Convolve1d())
_convolve_1d = Convolve1d()
_blockwise_convolve_1d = Blockwise(_convolve_1d)


def convolve1d(
Expand Down Expand Up @@ -235,14 +236,14 @@ def convolve1d(
zeros_right = (in2.shape[-1] - 1) // 2
in1 = join(
-1,
zeros((*in1_batch_shape, zeros_left), dtype=in2.dtype),
zeros((*in1_batch_shape, zeros_left), dtype=in1.dtype),
in1,
zeros((*in1_batch_shape, zeros_right), dtype=in2.dtype),
zeros((*in1_batch_shape, zeros_right), dtype=in1.dtype),
)
mode = "valid"

full_mode = as_scalar(np.bool_(mode == "full"))
return type_cast(TensorVariable, blockwise_convolve_1d(in1, in2, full_mode))
return type_cast(TensorVariable, _blockwise_convolve_1d(in1, in2, full_mode))


class Convolve2d(AbstractConvolveNd, Op): # type: ignore[misc]
Expand Down
3 changes: 2 additions & 1 deletion pytensor/tensor/slinalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@
from pytensor.tensor.blockwise import Blockwise
from pytensor.tensor.type import matrix, tensor, vector
from pytensor.tensor.variable import TensorVariable
from pytensor.utils import unzip


logger = logging.getLogger(__name__)
Expand Down Expand Up @@ -1323,7 +1324,7 @@ def grad(self, inputs, gout):
return [gout[0][slc] for slc in slices]

def infer_shape(self, fgraph, nodes, shapes):
first, second = zip(*shapes, strict=True)
first, second = unzip(shapes, n=2, strict=True)
return [(pt.add(*first), pt.add(*second))]

def _validate_and_prepare_inputs(self, matrices, as_tensor_func):
Expand Down
3 changes: 2 additions & 1 deletion pytensor/tensor/subtensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@
make_slice,
)
from pytensor.tensor.variable import TensorConstant, TensorVariable
from pytensor.utils import unzip


_logger = logging.getLogger("pytensor.tensor.subtensor")
Expand Down Expand Up @@ -650,7 +651,7 @@ def indexed_result_shape(array_shape, indices, indices_are_shapes=False):
)

for basic, grp_dim_indices in idx_groups:
dim_nums, grp_indices = zip(*grp_dim_indices, strict=True)
dim_nums, grp_indices = unzip(grp_dim_indices, n=2, strict=True)
remaining_dims = tuple(dim for dim in remaining_dims if dim not in dim_nums)

if basic:
Expand Down
11 changes: 11 additions & 0 deletions pytensor/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -338,3 +338,14 @@ def __eq__(self, other):

def __hash__(self):
return hash(type(self))


def unzip(iterable, n: int, strict: bool = False):
Comment thread
jessegrabowski marked this conversation as resolved.
"""Unzip a nested iterable, returns n empty tuples if empty.

It can be safely unpacked into n variables.
"""
res = tuple(zip(*iterable, strict=strict))
if not res:
return ((),) * n
return res
2 changes: 1 addition & 1 deletion pytensor/xtensor/__init__.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
import warnings

import pytensor.xtensor.rewriting
from pytensor.xtensor import linalg, math, random
from pytensor.xtensor import linalg, math, random, signal
from pytensor.xtensor.math import dot
from pytensor.xtensor.shape import broadcast, concat, full_like, ones_like, zeros_like
from pytensor.xtensor.type import (
Expand Down
3 changes: 3 additions & 0 deletions pytensor/xtensor/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,9 @@ def perform(self, node, inputs, outputs):
f"xtensor operation {self} must be lowered to equivalent tensor operations"
)

def do_constant_folding(self, fgraph, node):
return False


class XTypeCastOp(TypeCastingOp):
"""Base class for Ops that type cast between TensorType and XTensorType.
Expand Down
46 changes: 6 additions & 40 deletions pytensor/xtensor/shape.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import typing
import warnings
from collections.abc import Hashable, Sequence
from collections.abc import Sequence
from types import EllipsisType
from typing import Literal

Expand All @@ -12,6 +12,7 @@
from pytensor.tensor.exceptions import NotScalarConstantError
from pytensor.tensor.type import integer_dtypes
from pytensor.tensor.utils import get_static_shape_from_size_variables
from pytensor.utils import unzip
from pytensor.xtensor.basic import XOp
from pytensor.xtensor.math import cast, second
from pytensor.xtensor.type import XTensorVariable, as_xtensor, xtensor
Expand Down Expand Up @@ -296,7 +297,7 @@ def make_node(self, *inputs):
if concat_dim not in inp.type.dims:
dims_and_shape[concat_dim] += 1

dims, shape = zip(*dims_and_shape.items())
dims, shape = unzip(dims_and_shape.items(), n=2)
dtype = upcast(*[x.type.dtype for x in inputs])
output = xtensor(dtype=dtype, dims=dims, shape=shape)
return Apply(self, inputs, [output])
Expand Down Expand Up @@ -383,28 +384,10 @@ def make_node(self, x):
return Apply(self, [x], [out])


def squeeze(x, dim=None, drop=False, axis=None):
def squeeze(x, dim: str | Sequence[str] | None = None):
Comment thread
jessegrabowski marked this conversation as resolved.
"""Remove dimensions of size 1 from an XTensorVariable."""
x = as_xtensor(x)

# drop parameter is ignored in pytensor.xtensor
if drop is not None:
warnings.warn("drop parameter has no effect in pytensor.xtensor", UserWarning)

# dim and axis are mutually exclusive
if dim is not None and axis is not None:
raise ValueError("Cannot specify both `dim` and `axis`")

# if axis is specified, it must be a sequence of ints
if axis is not None:
if not isinstance(axis, Sequence):
axis = [axis]
if not all(isinstance(a, int) for a in axis):
raise ValueError("axis must be an integer or a sequence of integers")

# convert axis to dims
dims = tuple(x.type.dims[i] for i in axis)

# if dim is specified, it must be a string or a sequence of strings
if dim is None:
dims = tuple(d for d, s in zip(x.type.dims, x.type.shape) if s == 1)
Expand Down Expand Up @@ -460,33 +443,18 @@ def make_node(self, x, size):
return Apply(self, [x, size], [out])


def expand_dims(x, dim=None, create_index_for_new_dim=None, axis=None, **dim_kwargs):
def expand_dims(x, dim=None, axis=None, **dim_kwargs):
"""Add one or more new dimensions to an XTensorVariable."""
x = as_xtensor(x)

# Store original dimensions for axis handling
original_dims = x.type.dims

# Warn if create_index_for_new_dim is used (not supported)
if create_index_for_new_dim is not None:
warnings.warn(
"create_index_for_new_dim=False has no effect in pytensor.xtensor",
UserWarning,
stacklevel=2,
)

if dim is None:
dim = dim_kwargs
elif dim_kwargs:
raise ValueError("Cannot specify both `dim` and `**dim_kwargs`")

# Check that dim is Hashable or a sequence of Hashable or dict
if not isinstance(dim, Hashable):
if not isinstance(dim, Sequence | dict):
raise TypeError(f"unhashable type: {type(dim).__name__}")
if not all(isinstance(d, Hashable) for d in dim):
raise TypeError(f"unhashable type in {type(dim).__name__}")

# Normalize to a dimension-size mapping
if isinstance(dim, str):
dims_dict = {dim: 1}
Expand All @@ -495,9 +463,7 @@ def expand_dims(x, dim=None, create_index_for_new_dim=None, axis=None, **dim_kwa
elif isinstance(dim, dict):
dims_dict = {}
for name, val in dim.items():
if isinstance(val, str):
raise TypeError(f"Dimension size cannot be a string: {val}")
if isinstance(val, Sequence | np.ndarray):
if isinstance(val, list | tuple | np.ndarray):
warnings.warn(
"When a sequence is provided as a dimension size, only its length is used. "
"The actual values (which would be coordinates in xarray) are ignored.",
Expand Down
74 changes: 74 additions & 0 deletions pytensor/xtensor/signal.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,74 @@
from typing import Literal

import numpy as np

from pytensor.scalar import as_scalar
from pytensor.tensor import zeros
from pytensor.tensor.signal.conv import _convolve_1d
from pytensor.xtensor.shape import concat
from pytensor.xtensor.type import as_xtensor
from pytensor.xtensor.vectorization import XBlockwise


def convolve1d(
in1,
in2,
mode: Literal["full", "valid", "same"] = "full",
*,
dims: tuple[str, str],
):
"""Convolve two arrays along a single dimension.

Convolve in1 and in2, with the output size determined by the mode argument.

Parameters
----------
in1 : XTensorVariable
First input.
in2 : XTensorVariable
Second input.
mode : {'full', 'valid', 'same'}, optional
A string indicating the size of the output:
- 'full': The output is the full discrete linear convolution of the inputs, with shape (..., N+M-1,).
- 'valid': The output consists only of elements that do not rely on zero-padding, with shape (..., max(N, M) - min(N, M) + 1,).
- 'same': The output is the same size as in1, centered with respect to the 'full' output.
dims: tuple[str, str]
The dimension along which to convolve each of the inputs. Must be unique to each input.
The left dimension will be present in the output.

Returns
-------
out: XTensorVariable
The discrete linear convolution of in1 with in2.

"""
if len(dims) != 2:
raise ValueError(f"Two dims required, got {dims}")

in1_dim, in2_dim = dims

if in1_dim == in2_dim:
raise ValueError(f"The two dims must be unique, got {dims}")

if mode == "same":
# We implement "same" as "valid" with padded `in1`.
in2_core_size = in2.sizes[in2_dim]
zeros_left = as_xtensor(
zeros(in2_core_size // 2, dtype=in1.dtype), dims=(in1_dim,)
)
zeros_right = as_xtensor(
zeros((in2_core_size - 1) // 2, dtype=in1.dtype), dims=(in1_dim,)
)
in1 = concat([zeros_left, in1, zeros_right], dim=in1_dim)
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

if you bully me about it i'll make xtensor for pad

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

why concat is so pretty

mode = "valid"
Comment thread
ricardoV94 marked this conversation as resolved.
elif mode not in {"full", "valid"}:
raise ValueError(f"mode must be one of 'full', 'valid', or 'same', got {mode}")

full_mode = as_scalar(np.bool_(mode == "full"))

xop = XBlockwise(
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I thought you took this away, i misunderstood your comment maybe

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I took away the XBlockwise argument that says core input dimensions with the same name need not be aligned in length.

That was needed so both in1 and in2 could have the same dim, but need not have the same length. Now that they must use separate dims I didn't need that special argument.

Copy link
Copy Markdown
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What did you think I meant?

_convolve_1d,
core_dims=(((in1_dim,), (in2_dim,), ()), ((in1_dim,),)),
signature=_convolve_1d.gufunc_signature,
)
return xop(in1, in2, full_mode)
Loading
Loading