some new features

This commit is contained in:
ilgazca
2025-07-30 17:09:11 +03:00
parent db5d46760a
commit 8019bd3b7c
20616 changed files with 4375466 additions and 8 deletions

View File

@ -0,0 +1,71 @@
"""
Linear Solvers
==============
The default solver is SuperLU (included in the scipy distribution),
which can solve real or complex linear systems in both single and
double precisions. It is automatically replaced by UMFPACK, if
available. Note that UMFPACK works in double precision only, so
switch it off by::
>>> from scipy.sparse.linalg import spsolve, use_solver
>>> use_solver(useUmfpack=False)
to solve in the single precision. See also use_solver documentation.
Example session::
>>> from scipy.sparse import csc_matrix, spdiags
>>> from numpy import array
>>>
>>> print("Inverting a sparse linear system:")
>>> print("The sparse matrix (constructed from diagonals):")
>>> a = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5)
>>> b = array([1, 2, 3, 4, 5])
>>> print("Solve: single precision complex:")
>>> use_solver( useUmfpack = False )
>>> a = a.astype('F')
>>> x = spsolve(a, b)
>>> print(x)
>>> print("Error: ", a@x-b)
>>>
>>> print("Solve: double precision complex:")
>>> use_solver( useUmfpack = True )
>>> a = a.astype('D')
>>> x = spsolve(a, b)
>>> print(x)
>>> print("Error: ", a@x-b)
>>>
>>> print("Solve: double precision:")
>>> a = a.astype('d')
>>> x = spsolve(a, b)
>>> print(x)
>>> print("Error: ", a@x-b)
>>>
>>> print("Solve: single precision:")
>>> use_solver( useUmfpack = False )
>>> a = a.astype('f')
>>> x = spsolve(a, b.astype('f'))
>>> print(x)
>>> print("Error: ", a@x-b)
"""
#import umfpack
#__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) )
#del umfpack
from .linsolve import *
from ._superlu import SuperLU
from . import _add_newdocs
from . import linsolve
__all__ = [
'MatrixRankWarning', 'SuperLU', 'factorized',
'spilu', 'splu', 'spsolve',
'spsolve_triangular', 'use_solver'
]
from scipy._lib._testutils import PytestTester
test = PytestTester(__name__)
del PytestTester

View File

@ -0,0 +1,153 @@
from numpy.lib import add_newdoc
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU',
"""
LU factorization of a sparse matrix.
Factorization is represented as::
Pr @ A @ Pc = L @ U
To construct these `SuperLU` objects, call the `splu` and `spilu`
functions.
Attributes
----------
shape
nnz
perm_c
perm_r
L
U
Methods
-------
solve
Notes
-----
.. versionadded:: 0.14.0
Examples
--------
The LU decomposition can be used to solve matrix equations. Consider:
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import splu
>>> A = csc_matrix([[1,2,0,4], [1,0,0,1], [1,0,2,1], [2,2,1,0.]])
This can be solved for a given right-hand side:
>>> lu = splu(A)
>>> b = np.array([1, 2, 3, 4])
>>> x = lu.solve(b)
>>> A.dot(x)
array([ 1., 2., 3., 4.])
The ``lu`` object also contains an explicit representation of the
decomposition. The permutations are represented as mappings of
indices:
>>> lu.perm_r
array([2, 1, 3, 0], dtype=int32) # may vary
>>> lu.perm_c
array([0, 1, 3, 2], dtype=int32) # may vary
The L and U factors are sparse matrices in CSC format:
>>> lu.L.toarray()
array([[ 1. , 0. , 0. , 0. ], # may vary
[ 0.5, 1. , 0. , 0. ],
[ 0.5, -1. , 1. , 0. ],
[ 0.5, 1. , 0. , 1. ]])
>>> lu.U.toarray()
array([[ 2. , 2. , 0. , 1. ], # may vary
[ 0. , -1. , 1. , -0.5],
[ 0. , 0. , 5. , -1. ],
[ 0. , 0. , 0. , 2. ]])
The permutation matrices can be constructed:
>>> Pr = csc_matrix((np.ones(4), (lu.perm_r, np.arange(4))))
>>> Pc = csc_matrix((np.ones(4), (np.arange(4), lu.perm_c)))
We can reassemble the original matrix:
>>> (Pr.T @ (lu.L @ lu.U) @ Pc.T).toarray()
array([[ 1., 2., 0., 4.],
[ 1., 0., 0., 1.],
[ 1., 0., 2., 1.],
[ 2., 2., 1., 0.]])
""")
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('solve',
"""
solve(rhs[, trans])
Solves linear system of equations with one or several right-hand sides.
Parameters
----------
rhs : ndarray, shape (n,) or (n, k)
Right hand side(s) of equation
trans : {'N', 'T', 'H'}, optional
Type of system to solve::
'N': A @ x == rhs (default)
'T': A^T @ x == rhs
'H': A^H @ x == rhs
i.e., normal, transposed, and hermitian conjugate.
Returns
-------
x : ndarray, shape ``rhs.shape``
Solution vector(s)
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('L',
"""
Lower triangular factor with unit diagonal as a
`scipy.sparse.csc_matrix`.
.. versionadded:: 0.14.0
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('U',
"""
Upper triangular factor as a `scipy.sparse.csc_matrix`.
.. versionadded:: 0.14.0
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('shape',
"""
Shape of the original matrix as a tuple of ints.
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('nnz',
"""
Number of nonzero elements in the matrix.
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_c',
"""
Permutation Pc represented as an array of indices.
The column permutation matrix can be reconstructed via:
>>> Pc = np.zeros((n, n))
>>> Pc[np.arange(n), perm_c] = 1
"""))
add_newdoc('scipy.sparse.linalg._dsolve._superlu', 'SuperLU', ('perm_r',
"""
Permutation Pr represented as an array of indices.
The row permutation matrix can be reconstructed via:
>>> Pr = np.zeros((n, n))
>>> Pr[perm_r, np.arange(n)] = 1
"""))

View File

@ -0,0 +1,742 @@
from warnings import warn, catch_warnings, simplefilter
import numpy as np
from numpy import asarray
from scipy.sparse import (issparse,
SparseEfficiencyWarning, csc_matrix, eye, diags)
from scipy.sparse._sputils import is_pydata_spmatrix, convert_pydata_sparse_to_scipy
from scipy.linalg import LinAlgError
import copy
from . import _superlu
noScikit = False
try:
import scikits.umfpack as umfpack
except ImportError:
noScikit = True
useUmfpack = not noScikit
__all__ = ['use_solver', 'spsolve', 'splu', 'spilu', 'factorized',
'MatrixRankWarning', 'spsolve_triangular']
class MatrixRankWarning(UserWarning):
pass
def use_solver(**kwargs):
"""
Select default sparse direct solver to be used.
Parameters
----------
useUmfpack : bool, optional
Use UMFPACK [1]_, [2]_, [3]_, [4]_. over SuperLU. Has effect only
if ``scikits.umfpack`` is installed. Default: True
assumeSortedIndices : bool, optional
Allow UMFPACK to skip the step of sorting indices for a CSR/CSC matrix.
Has effect only if useUmfpack is True and ``scikits.umfpack`` is
installed. Default: False
Notes
-----
The default sparse solver is UMFPACK when available
(``scikits.umfpack`` is installed). This can be changed by passing
useUmfpack = False, which then causes the always present SuperLU
based solver to be used.
UMFPACK requires a CSR/CSC matrix to have sorted column/row indices. If
sure that the matrix fulfills this, pass ``assumeSortedIndices=True``
to gain some speed.
References
----------
.. [1] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
multifrontal method with a column pre-ordering strategy, ACM
Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
https://dl.acm.org/doi/abs/10.1145/992200.992206
.. [2] T. A. Davis, A column pre-ordering strategy for the
unsymmetric-pattern multifrontal method, ACM Trans.
on Mathematical Software, 30(2), 2004, pp. 165--195.
https://dl.acm.org/doi/abs/10.1145/992200.992205
.. [3] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
method for unsymmetric sparse matrices, ACM Trans. on
Mathematical Software, 25(1), 1999, pp. 1--19.
https://doi.org/10.1145/305658.287640
.. [4] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
method for sparse LU factorization, SIAM J. Matrix Analysis and
Computations, 18(1), 1997, pp. 140--158.
https://doi.org/10.1137/S0895479894246905T.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import use_solver, spsolve
>>> from scipy.sparse import csc_matrix
>>> R = np.random.randn(5, 5)
>>> A = csc_matrix(R)
>>> b = np.random.randn(5)
>>> use_solver(useUmfpack=False) # enforce superLU over UMFPACK
>>> x = spsolve(A, b)
>>> np.allclose(A.dot(x), b)
True
>>> use_solver(useUmfpack=True) # reset umfPack usage to default
"""
if 'useUmfpack' in kwargs:
globals()['useUmfpack'] = kwargs['useUmfpack']
if useUmfpack and 'assumeSortedIndices' in kwargs:
umfpack.configure(assumeSortedIndices=kwargs['assumeSortedIndices'])
def _get_umf_family(A):
"""Get umfpack family string given the sparse matrix dtype."""
_families = {
(np.float64, np.int32): 'di',
(np.complex128, np.int32): 'zi',
(np.float64, np.int64): 'dl',
(np.complex128, np.int64): 'zl'
}
# A.dtype.name can only be "float64" or
# "complex128" in control flow
f_type = getattr(np, A.dtype.name)
# control flow may allow for more index
# types to get through here
i_type = getattr(np, A.indices.dtype.name)
try:
family = _families[(f_type, i_type)]
except KeyError as e:
msg = ('only float64 or complex128 matrices with int32 or int64 '
f'indices are supported! (got: matrix: {f_type}, indices: {i_type})')
raise ValueError(msg) from e
# See gh-8278. Considered converting only if
# A.shape[0]*A.shape[1] > np.iinfo(np.int32).max,
# but that didn't always fix the issue.
family = family[0] + "l"
A_new = copy.copy(A)
A_new.indptr = np.asarray(A.indptr, dtype=np.int64)
A_new.indices = np.asarray(A.indices, dtype=np.int64)
return family, A_new
def _safe_downcast_indices(A):
# check for safe downcasting
max_value = np.iinfo(np.intc).max
if A.indptr[-1] > max_value: # indptr[-1] is max b/c indptr always sorted
raise ValueError("indptr values too large for SuperLU")
if max(*A.shape) > max_value: # only check large enough arrays
if np.any(A.indices > max_value):
raise ValueError("indices values too large for SuperLU")
indices = A.indices.astype(np.intc, copy=False)
indptr = A.indptr.astype(np.intc, copy=False)
return indices, indptr
def spsolve(A, b, permc_spec=None, use_umfpack=True):
"""Solve the sparse linear system Ax=b, where b may be a vector or a matrix.
Parameters
----------
A : ndarray or sparse matrix
The square matrix A will be converted into CSC or CSR form
b : ndarray or sparse matrix
The matrix or vector representing the right hand side of the equation.
If a vector, b.shape must be (n,) or (n, 1).
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering [1]_, [2]_.
use_umfpack : bool, optional
if True (default) then use UMFPACK for the solution [3]_, [4]_, [5]_,
[6]_ . This is only referenced if b is a vector and
``scikits.umfpack`` is installed.
Returns
-------
x : ndarray or sparse matrix
the solution of the sparse linear equation.
If b is a vector, then x is a vector of size A.shape[1]
If b is a matrix, then x is a matrix of size (A.shape[1], b.shape[1])
Notes
-----
For solving the matrix expression AX = B, this solver assumes the resulting
matrix X is sparse, as is often the case for very sparse inputs. If the
resulting X is dense, the construction of this sparse result will be
relatively expensive. In that case, consider converting A to a dense
matrix and using scipy.linalg.solve or its variants.
References
----------
.. [1] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, Algorithm 836:
COLAMD, an approximate column minimum degree ordering algorithm,
ACM Trans. on Mathematical Software, 30(3), 2004, pp. 377--380.
:doi:`10.1145/1024074.1024080`
.. [2] T. A. Davis, J. R. Gilbert, S. Larimore, E. Ng, A column approximate
minimum degree ordering algorithm, ACM Trans. on Mathematical
Software, 30(3), 2004, pp. 353--376. :doi:`10.1145/1024074.1024079`
.. [3] T. A. Davis, Algorithm 832: UMFPACK - an unsymmetric-pattern
multifrontal method with a column pre-ordering strategy, ACM
Trans. on Mathematical Software, 30(2), 2004, pp. 196--199.
https://dl.acm.org/doi/abs/10.1145/992200.992206
.. [4] T. A. Davis, A column pre-ordering strategy for the
unsymmetric-pattern multifrontal method, ACM Trans.
on Mathematical Software, 30(2), 2004, pp. 165--195.
https://dl.acm.org/doi/abs/10.1145/992200.992205
.. [5] T. A. Davis and I. S. Duff, A combined unifrontal/multifrontal
method for unsymmetric sparse matrices, ACM Trans. on
Mathematical Software, 25(1), 1999, pp. 1--19.
https://doi.org/10.1145/305658.287640
.. [6] T. A. Davis and I. S. Duff, An unsymmetric-pattern multifrontal
method for sparse LU factorization, SIAM J. Matrix Analysis and
Computations, 18(1), 1997, pp. 140--158.
https://doi.org/10.1137/S0895479894246905T.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import spsolve
>>> A = csc_matrix([[3, 2, 0], [1, -1, 0], [0, 5, 1]], dtype=float)
>>> B = csc_matrix([[2, 0], [-1, 0], [2, 0]], dtype=float)
>>> x = spsolve(A, B)
>>> np.allclose(A.dot(x).toarray(), B.toarray())
True
"""
is_pydata_sparse = is_pydata_spmatrix(b)
pydata_sparse_cls = b.__class__ if is_pydata_sparse else None
A = convert_pydata_sparse_to_scipy(A)
b = convert_pydata_sparse_to_scipy(b)
if not (issparse(A) and A.format in ("csc", "csr")):
A = csc_matrix(A)
warn('spsolve requires A be CSC or CSR matrix format',
SparseEfficiencyWarning, stacklevel=2)
# b is a vector only if b have shape (n,) or (n, 1)
b_is_sparse = issparse(b)
if not b_is_sparse:
b = asarray(b)
b_is_vector = ((b.ndim == 1) or (b.ndim == 2 and b.shape[1] == 1))
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
result_dtype = np.promote_types(A.dtype, b.dtype)
if A.dtype != result_dtype:
A = A.astype(result_dtype)
if b.dtype != result_dtype:
b = b.astype(result_dtype)
# validate input shapes
M, N = A.shape
if (M != N):
raise ValueError(f"matrix must be square (has shape {(M, N)})")
if M != b.shape[0]:
raise ValueError(f"matrix - rhs dimension mismatch ({A.shape} - {b.shape[0]})")
use_umfpack = use_umfpack and useUmfpack
if b_is_vector and use_umfpack:
if b_is_sparse:
b_vec = b.toarray()
else:
b_vec = b
b_vec = asarray(b_vec, dtype=A.dtype).ravel()
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
umf_family, A = _get_umf_family(A)
umf = umfpack.UmfpackContext(umf_family)
x = umf.linsolve(umfpack.UMFPACK_A, A, b_vec,
autoTranspose=True)
else:
if b_is_vector and b_is_sparse:
b = b.toarray()
b_is_sparse = False
if not b_is_sparse:
if A.format == "csc":
flag = 1 # CSC format
else:
flag = 0 # CSR format
indices = A.indices.astype(np.intc, copy=False)
indptr = A.indptr.astype(np.intc, copy=False)
options = dict(ColPerm=permc_spec)
x, info = _superlu.gssv(N, A.nnz, A.data, indices, indptr,
b, flag, options=options)
if info != 0:
warn("Matrix is exactly singular", MatrixRankWarning, stacklevel=2)
x.fill(np.nan)
if b_is_vector:
x = x.ravel()
else:
# b is sparse
Afactsolve = factorized(A)
if not (b.format == "csc" or is_pydata_spmatrix(b)):
warn('spsolve is more efficient when sparse b '
'is in the CSC matrix format',
SparseEfficiencyWarning, stacklevel=2)
b = csc_matrix(b)
# Create a sparse output matrix by repeatedly applying
# the sparse factorization to solve columns of b.
data_segs = []
row_segs = []
col_segs = []
for j in range(b.shape[1]):
# TODO: replace this with
# bj = b[:, j].toarray().ravel()
# once 1D sparse arrays are supported.
# That is a slightly faster code path.
bj = b[:, [j]].toarray().ravel()
xj = Afactsolve(bj)
w = np.flatnonzero(xj)
segment_length = w.shape[0]
row_segs.append(w)
col_segs.append(np.full(segment_length, j, dtype=int))
data_segs.append(np.asarray(xj[w], dtype=A.dtype))
sparse_data = np.concatenate(data_segs)
sparse_row = np.concatenate(row_segs)
sparse_col = np.concatenate(col_segs)
x = A.__class__((sparse_data, (sparse_row, sparse_col)),
shape=b.shape, dtype=A.dtype)
if is_pydata_sparse:
x = pydata_sparse_cls.from_scipy_sparse(x)
return x
def splu(A, permc_spec=None, diag_pivot_thresh=None,
relax=None, panel_size=None, options=dict()):
"""
Compute the LU decomposition of a sparse, square matrix.
Parameters
----------
A : sparse matrix
Sparse matrix to factorize. Most efficient when provided in CSC
format. Other formats will be converted to CSC before factorization.
permc_spec : str, optional
How to permute the columns of the matrix for sparsity preservation.
(default: 'COLAMD')
- ``NATURAL``: natural ordering.
- ``MMD_ATA``: minimum degree ordering on the structure of A^T A.
- ``MMD_AT_PLUS_A``: minimum degree ordering on the structure of A^T+A.
- ``COLAMD``: approximate minimum degree column ordering
diag_pivot_thresh : float, optional
Threshold used for a diagonal entry to be an acceptable pivot.
See SuperLU user's guide for details [1]_
relax : int, optional
Expert option for customizing the degree of relaxing supernodes.
See SuperLU user's guide for details [1]_
panel_size : int, optional
Expert option for customizing the panel size.
See SuperLU user's guide for details [1]_
options : dict, optional
Dictionary containing additional expert options to SuperLU.
See SuperLU user guide [1]_ (section 2.4 on the 'Options' argument)
for more details. For example, you can specify
``options=dict(Equil=False, IterRefine='SINGLE'))``
to turn equilibration off and perform a single iterative refinement.
Returns
-------
invA : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
spilu : incomplete LU decomposition
Notes
-----
This function uses the SuperLU library.
References
----------
.. [1] SuperLU https://portal.nersc.gov/project/sparse/superlu/
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import splu
>>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
>>> B = splu(A)
>>> x = np.array([1., 2., 3.], dtype=float)
>>> B.solve(x)
array([ 1. , -3. , -1.5])
>>> A.dot(B.solve(x))
array([ 1., 2., 3.])
>>> B.solve(A.dot(x))
array([ 1., 2., 3.])
"""
if is_pydata_spmatrix(A):
def csc_construct_func(*a, cls=type(A)):
return cls.from_scipy_sparse(csc_matrix(*a))
A = A.to_scipy_sparse().tocsc()
else:
csc_construct_func = csc_matrix
if not (issparse(A) and A.format == "csc"):
A = csc_matrix(A)
warn('splu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
indices, indptr = _safe_downcast_indices(A)
_options = dict(DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
# Ensure that no column permutations are applied
if (_options["ColPerm"] == "NATURAL"):
_options["SymmetricMode"] = True
return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
csc_construct_func=csc_construct_func,
ilu=False, options=_options)
def spilu(A, drop_tol=None, fill_factor=None, drop_rule=None, permc_spec=None,
diag_pivot_thresh=None, relax=None, panel_size=None, options=None):
"""
Compute an incomplete LU decomposition for a sparse, square matrix.
The resulting object is an approximation to the inverse of `A`.
Parameters
----------
A : (N, N) array_like
Sparse matrix to factorize. Most efficient when provided in CSC format.
Other formats will be converted to CSC before factorization.
drop_tol : float, optional
Drop tolerance (0 <= tol <= 1) for an incomplete LU decomposition.
(default: 1e-4)
fill_factor : float, optional
Specifies the fill ratio upper bound (>= 1.0) for ILU. (default: 10)
drop_rule : str, optional
Comma-separated string of drop rules to use.
Available rules: ``basic``, ``prows``, ``column``, ``area``,
``secondary``, ``dynamic``, ``interp``. (Default: ``basic,area``)
See SuperLU documentation for details.
Remaining other options
Same as for `splu`
Returns
-------
invA_approx : scipy.sparse.linalg.SuperLU
Object, which has a ``solve`` method.
See also
--------
splu : complete LU decomposition
Notes
-----
To improve the better approximation to the inverse, you may need to
increase `fill_factor` AND decrease `drop_tol`.
This function uses the SuperLU library.
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_matrix
>>> from scipy.sparse.linalg import spilu
>>> A = csc_matrix([[1., 0., 0.], [5., 0., 2.], [0., -1., 0.]], dtype=float)
>>> B = spilu(A)
>>> x = np.array([1., 2., 3.], dtype=float)
>>> B.solve(x)
array([ 1. , -3. , -1.5])
>>> A.dot(B.solve(x))
array([ 1., 2., 3.])
>>> B.solve(A.dot(x))
array([ 1., 2., 3.])
"""
if is_pydata_spmatrix(A):
def csc_construct_func(*a, cls=type(A)):
return cls.from_scipy_sparse(csc_matrix(*a))
A = A.to_scipy_sparse().tocsc()
else:
csc_construct_func = csc_matrix
if not (issparse(A) and A.format == "csc"):
A = csc_matrix(A)
warn('spilu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
# sum duplicates for non-canonical format
A.sum_duplicates()
A = A._asfptype() # upcast to a floating point format
M, N = A.shape
if (M != N):
raise ValueError("can only factor square matrices") # is this true?
indices, indptr = _safe_downcast_indices(A)
_options = dict(ILU_DropRule=drop_rule, ILU_DropTol=drop_tol,
ILU_FillFactor=fill_factor,
DiagPivotThresh=diag_pivot_thresh, ColPerm=permc_spec,
PanelSize=panel_size, Relax=relax)
if options is not None:
_options.update(options)
# Ensure that no column permutations are applied
if (_options["ColPerm"] == "NATURAL"):
_options["SymmetricMode"] = True
return _superlu.gstrf(N, A.nnz, A.data, indices, indptr,
csc_construct_func=csc_construct_func,
ilu=True, options=_options)
def factorized(A):
"""
Return a function for solving a sparse linear system, with A pre-factorized.
Parameters
----------
A : (N, N) array_like
Input. A in CSC format is most efficient. A CSR format matrix will
be converted to CSC before factorization.
Returns
-------
solve : callable
To solve the linear system of equations given in `A`, the `solve`
callable should be passed an ndarray of shape (N,).
Examples
--------
>>> import numpy as np
>>> from scipy.sparse.linalg import factorized
>>> from scipy.sparse import csc_matrix
>>> A = np.array([[ 3. , 2. , -1. ],
... [ 2. , -2. , 4. ],
... [-1. , 0.5, -1. ]])
>>> solve = factorized(csc_matrix(A)) # Makes LU decomposition.
>>> rhs1 = np.array([1, -2, 0])
>>> solve(rhs1) # Uses the LU factors.
array([ 1., -2., -2.])
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsc()
if useUmfpack:
if noScikit:
raise RuntimeError('Scikits.umfpack not installed.')
if not (issparse(A) and A.format == "csc"):
A = csc_matrix(A)
warn('splu converted its input to CSC format',
SparseEfficiencyWarning, stacklevel=2)
A = A._asfptype() # upcast to a floating point format
if A.dtype.char not in 'dD':
raise ValueError("convert matrix data to double, please, using"
" .astype(), or set linsolve.useUmfpack = False")
umf_family, A = _get_umf_family(A)
umf = umfpack.UmfpackContext(umf_family)
# Make LU decomposition.
umf.numeric(A)
def solve(b):
with np.errstate(divide="ignore", invalid="ignore"):
# Ignoring warnings with numpy >= 1.23.0, see gh-16523
result = umf.solve(umfpack.UMFPACK_A, A, b, autoTranspose=True)
return result
return solve
else:
return splu(A).solve
def spsolve_triangular(A, b, lower=True, overwrite_A=False, overwrite_b=False,
unit_diagonal=False):
"""
Solve the equation ``A x = b`` for `x`, assuming A is a triangular matrix.
Parameters
----------
A : (M, M) sparse matrix
A sparse square triangular matrix. Should be in CSR or CSC format.
b : (M,) or (M, N) array_like
Right-hand side matrix in ``A x = b``
lower : bool, optional
Whether `A` is a lower or upper triangular matrix.
Default is lower triangular matrix.
overwrite_A : bool, optional
Allow changing `A`.
Enabling gives a performance gain. Default is False.
overwrite_b : bool, optional
Allow overwriting data in `b`.
Enabling gives a performance gain. Default is False.
If `overwrite_b` is True, it should be ensured that
`b` has an appropriate dtype to be able to store the result.
unit_diagonal : bool, optional
If True, diagonal elements of `a` are assumed to be 1.
.. versionadded:: 1.4.0
Returns
-------
x : (M,) or (M, N) ndarray
Solution to the system ``A x = b``. Shape of return matches shape
of `b`.
Raises
------
LinAlgError
If `A` is singular or not triangular.
ValueError
If shape of `A` or shape of `b` do not match the requirements.
Notes
-----
.. versionadded:: 0.19.0
Examples
--------
>>> import numpy as np
>>> from scipy.sparse import csc_array
>>> from scipy.sparse.linalg import spsolve_triangular
>>> A = csc_array([[3, 0, 0], [1, -1, 0], [2, 0, 1]], dtype=float)
>>> B = np.array([[2, 0], [-1, 0], [2, 0]], dtype=float)
>>> x = spsolve_triangular(A, B)
>>> np.allclose(A.dot(x), B)
True
"""
if is_pydata_spmatrix(A):
A = A.to_scipy_sparse().tocsc()
trans = "N"
if issparse(A) and A.format == "csr":
A = A.T
trans = "T"
lower = not lower
if not (issparse(A) and A.format == "csc"):
warn('CSC or CSR matrix format is required. Converting to CSC matrix.',
SparseEfficiencyWarning, stacklevel=2)
A = csc_matrix(A)
elif not overwrite_A:
A = A.copy()
M, N = A.shape
if M != N:
raise ValueError(
f'A must be a square matrix but its shape is {A.shape}.')
if unit_diagonal:
with catch_warnings():
simplefilter('ignore', SparseEfficiencyWarning)
A.setdiag(1)
else:
diag = A.diagonal()
if np.any(diag == 0):
raise LinAlgError(
'A is singular: zero entry on diagonal.')
invdiag = 1/diag
if trans == "N":
A = A @ diags(invdiag)
else:
A = (A.T @ diags(invdiag)).T
# sum duplicates for non-canonical format
A.sum_duplicates()
b = np.asanyarray(b)
if b.ndim not in [1, 2]:
raise ValueError(
f'b must have 1 or 2 dims but its shape is {b.shape}.')
if M != b.shape[0]:
raise ValueError(
'The size of the dimensions of A must be equal to '
'the size of the first dimension of b but the shape of A is '
f'{A.shape} and the shape of b is {b.shape}.'
)
result_dtype = np.promote_types(np.promote_types(A.dtype, np.float32), b.dtype)
if A.dtype != result_dtype:
A = A.astype(result_dtype)
if b.dtype != result_dtype:
b = b.astype(result_dtype)
elif not overwrite_b:
b = b.copy()
if lower:
L = A
U = csc_matrix((N, N), dtype=result_dtype)
else:
L = eye(N, dtype=result_dtype, format='csc')
U = A
U.setdiag(0)
x, info = _superlu.gstrs(trans,
N, L.nnz, L.data, L.indices, L.indptr,
N, U.nnz, U.data, U.indices, U.indptr,
b)
if info:
raise LinAlgError('A is singular.')
if not unit_diagonal:
invdiag = invdiag.reshape(-1, *([1] * (len(x.shape) - 1)))
x = x * invdiag
return x

View File

@ -0,0 +1,883 @@
import sys
import threading
import numpy as np
from numpy import array, finfo, arange, eye, all, unique, ones, dot
import numpy.random as random
from numpy.testing import (
assert_array_almost_equal, assert_almost_equal,
assert_equal, assert_array_equal, assert_, assert_allclose,
assert_warns, suppress_warnings)
import pytest
from pytest import raises as assert_raises
import scipy.linalg
from scipy.linalg import norm, inv
from scipy.sparse import (spdiags, SparseEfficiencyWarning, csc_matrix,
csr_matrix, identity, issparse, dok_matrix, lil_matrix, bsr_matrix)
from scipy.sparse.linalg import SuperLU
from scipy.sparse.linalg._dsolve import (spsolve, use_solver, splu, spilu,
MatrixRankWarning, _superlu, spsolve_triangular, factorized)
import scipy.sparse
from scipy._lib._testutils import check_free_memory
from scipy._lib._util import ComplexWarning
sup_sparse_efficiency = suppress_warnings()
sup_sparse_efficiency.filter(SparseEfficiencyWarning)
# scikits.umfpack is not a SciPy dependency but it is optionally used in
# dsolve, so check whether it's available
try:
import scikits.umfpack as umfpack
has_umfpack = True
except ImportError:
has_umfpack = False
def toarray(a):
if issparse(a):
return a.toarray()
else:
return a
def setup_bug_8278():
N = 2 ** 6
h = 1/N
Ah1D = scipy.sparse.diags([-1, 2, -1], [-1, 0, 1],
shape=(N-1, N-1))/(h**2)
eyeN = scipy.sparse.eye(N - 1)
A = (scipy.sparse.kron(eyeN, scipy.sparse.kron(eyeN, Ah1D))
+ scipy.sparse.kron(eyeN, scipy.sparse.kron(Ah1D, eyeN))
+ scipy.sparse.kron(Ah1D, scipy.sparse.kron(eyeN, eyeN)))
b = np.random.rand((N-1)**3)
return A, b
class TestFactorized:
def setup_method(self):
n = 5
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n).tocsc()
random.seed(1234)
def _check_singular(self):
A = csc_matrix((5,5), dtype='d')
b = ones(5)
assert_array_almost_equal(0. * b, factorized(A)(b))
def _check_non_singular(self):
# Make a diagonal dominant, to make sure it is not singular
n = 5
a = csc_matrix(random.rand(n, n))
b = ones(n)
expected = splu(a).solve(b)
assert_array_almost_equal(factorized(a)(b), expected)
def test_singular_without_umfpack(self):
use_solver(useUmfpack=False)
with assert_raises(RuntimeError, match="Factor is exactly singular"):
self._check_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_singular_with_umfpack(self):
use_solver(useUmfpack=True)
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "divide by zero encountered in double_scalars")
assert_warns(umfpack.UmfpackWarning, self._check_singular)
def test_non_singular_without_umfpack(self):
use_solver(useUmfpack=False)
self._check_non_singular()
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_non_singular_with_umfpack(self):
use_solver(useUmfpack=True)
self._check_non_singular()
def test_cannot_factorize_nonsquare_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
msg = "can only factor square matrices"
with assert_raises(ValueError, match=msg):
factorized(self.A[:, :4])
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_factorizes_nonsquare_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
# does not raise
factorized(self.A[:,:4])
def test_call_with_incorrectly_sized_matrix_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
with assert_raises(ValueError, match="is of incompatible size"):
solve(b)
with assert_raises(ValueError, match="is of incompatible size"):
solve(B)
with assert_raises(ValueError,
match="object too deep for desired array"):
solve(BB)
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_incorrectly_sized_matrix_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
B = random.rand(4, 3)
BB = random.rand(self.n, 3, 9)
# does not raise
solve(b)
msg = "object too deep for desired array"
with assert_raises(ValueError, match=msg):
solve(B)
with assert_raises(ValueError, match=msg):
solve(BB)
def test_call_with_cast_to_complex_without_umfpack(self):
use_solver(useUmfpack=False)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
with assert_raises(TypeError, match="Cannot cast array data"):
solve(b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_call_with_cast_to_complex_with_umfpack(self):
use_solver(useUmfpack=True)
solve = factorized(self.A)
b = random.rand(4)
for t in [np.complex64, np.complex128]:
assert_warns(ComplexWarning, solve, b.astype(t))
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_assume_sorted_indices_flag(self):
# a sparse matrix with unsorted indices
unsorted_inds = np.array([2, 0, 1, 0])
data = np.array([10, 16, 5, 0.4])
indptr = np.array([0, 1, 2, 4])
A = csc_matrix((data, unsorted_inds, indptr), (3, 3))
b = ones(3)
# should raise when incorrectly assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=True)
with assert_raises(RuntimeError,
match="UMFPACK_ERROR_invalid_matrix"):
factorized(A)
# should sort indices and succeed when not assuming indices are sorted
use_solver(useUmfpack=True, assumeSortedIndices=False)
expected = splu(A.copy()).solve(b)
assert_equal(A.has_sorted_indices, 0)
assert_array_almost_equal(factorized(A)(b), expected)
@pytest.mark.slow
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_bug_8278(self):
check_free_memory(8000)
use_solver(useUmfpack=True)
A, b = setup_bug_8278()
A = A.tocsc()
f = factorized(A)
x = f(b)
assert_array_almost_equal(A @ x, b)
class TestLinsolve:
def setup_method(self):
use_solver(useUmfpack=False)
def test_singular(self):
A = csc_matrix((5,5), dtype='d')
b = array([1, 2, 3, 4, 5],dtype='d')
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
x = spsolve(A, b)
assert_(not np.isfinite(x).any())
def test_singular_gh_3312(self):
# "Bad" test case that leads SuperLU to call LAPACK with invalid
# arguments. Check that it fails moderately gracefully.
ij = np.array([(17, 0), (17, 6), (17, 12), (10, 13)], dtype=np.int32)
v = np.array([0.284213, 0.94933781, 0.15767017, 0.38797296])
A = csc_matrix((v, ij.T), shape=(20, 20))
b = np.arange(20)
try:
# should either raise a runtime error or return value
# appropriate for singular input (which yields the warning)
with suppress_warnings() as sup:
sup.filter(MatrixRankWarning, "Matrix is exactly singular")
x = spsolve(A, b)
assert not np.isfinite(x).any()
except RuntimeError:
pass
@pytest.mark.parametrize('format', ['csc', 'csr'])
@pytest.mark.parametrize('idx_dtype', [np.int32, np.int64])
def test_twodiags(self, format: str, idx_dtype: np.dtype):
A = spdiags([[1, 2, 3, 4, 5], [6, 5, 8, 9, 10]], [0, 1], 5, 5,
format=format)
b = array([1, 2, 3, 4, 5])
# condition number of A
cond_A = norm(A.toarray(), 2) * norm(inv(A.toarray()), 2)
for t in ['f','d','F','D']:
eps = finfo(t).eps # floating point epsilon
b = b.astype(t)
Asp = A.astype(t)
Asp.indices = Asp.indices.astype(idx_dtype, copy=False)
Asp.indptr = Asp.indptr.astype(idx_dtype, copy=False)
x = spsolve(Asp, b)
assert_(norm(b - Asp@x) < 10 * cond_A * eps)
def test_bvector_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3)
b = As@x
x2 = spsolve(As, b)
assert_array_almost_equal(x, x2)
def test_bmatrix_smoketest(self):
Adense = array([[0., 1., 1.],
[1., 0., 1.],
[0., 0., 1.]])
As = csc_matrix(Adense)
random.seed(1234)
x = random.randn(3, 4)
Bdense = As.dot(x)
Bs = csc_matrix(Bdense)
x2 = spsolve(As, Bs)
assert_array_almost_equal(x, x2.toarray())
@sup_sparse_efficiency
def test_non_square(self):
# A is not square.
A = ones((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve, A, b)
# A2 and b2 have incompatible shapes.
A2 = csc_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve, A2, b2)
@sup_sparse_efficiency
def test_example_comparison(self):
row = array([0,0,1,2,2,2])
col = array([0,2,2,0,1,2])
data = array([1,2,3,-4,5,6])
sM = csr_matrix((data,(row,col)), shape=(3,3), dtype=float)
M = sM.toarray()
row = array([0,0,1,1,0,0])
col = array([0,2,1,1,0,0])
data = array([1,1,1,1,1,1])
sN = csr_matrix((data, (row,col)), shape=(3,3), dtype=float)
N = sN.toarray()
sX = spsolve(sM, sN)
X = scipy.linalg.solve(M, N)
assert_array_almost_equal(X, sX.toarray())
@sup_sparse_efficiency
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_shape_compatibility(self):
use_solver(useUmfpack=True)
A = csc_matrix([[1., 0], [0, 2]])
bs = [
[1, 6],
array([1, 6]),
[[1], [6]],
array([[1], [6]]),
csc_matrix([[1], [6]]),
csr_matrix([[1], [6]]),
dok_matrix([[1], [6]]),
bsr_matrix([[1], [6]]),
array([[1., 2., 3.], [6., 8., 10.]]),
csc_matrix([[1., 2., 3.], [6., 8., 10.]]),
csr_matrix([[1., 2., 3.], [6., 8., 10.]]),
dok_matrix([[1., 2., 3.], [6., 8., 10.]]),
bsr_matrix([[1., 2., 3.], [6., 8., 10.]]),
]
for b in bs:
x = np.linalg.solve(A.toarray(), toarray(b))
for spmattype in [csc_matrix, csr_matrix, dok_matrix, lil_matrix]:
x1 = spsolve(spmattype(A), b, use_umfpack=True)
x2 = spsolve(spmattype(A), b, use_umfpack=False)
# check solution
if x.ndim == 2 and x.shape[1] == 1:
# interprets also these as "vectors"
x = x.ravel()
assert_array_almost_equal(toarray(x1), x,
err_msg=repr((b, spmattype, 1)))
assert_array_almost_equal(toarray(x2), x,
err_msg=repr((b, spmattype, 2)))
# dense vs. sparse output ("vectors" are always dense)
if issparse(b) and x.ndim > 1:
assert_(issparse(x1), repr((b, spmattype, 1)))
assert_(issparse(x2), repr((b, spmattype, 2)))
else:
assert_(isinstance(x1, np.ndarray), repr((b, spmattype, 1)))
assert_(isinstance(x2, np.ndarray), repr((b, spmattype, 2)))
# check output shape
if x.ndim == 1:
# "vector"
assert_equal(x1.shape, (A.shape[1],))
assert_equal(x2.shape, (A.shape[1],))
else:
# "matrix"
assert_equal(x1.shape, x.shape)
assert_equal(x2.shape, x.shape)
A = csc_matrix((3, 3))
b = csc_matrix((1, 3))
assert_raises(ValueError, spsolve, A, b)
@sup_sparse_efficiency
def test_ndarray_support(self):
A = array([[1., 2.], [2., 0.]])
x = array([[1., 1.], [0.5, -0.5]])
b = array([[2., 0.], [2., 2.]])
assert_array_almost_equal(x, spsolve(A, b))
def test_gssv_badinput(self):
N = 10
d = arange(N) + 1.0
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), N, N)
for spmatrix in (csc_matrix, csr_matrix):
A = spmatrix(A)
b = np.arange(N)
def not_c_contig(x):
return x.repeat(2)[::2]
def not_1dim(x):
return x[:,None]
def bad_type(x):
return x.astype(bool)
def too_short(x):
return x[:-1]
badops = [not_c_contig, not_1dim, bad_type, too_short]
for badop in badops:
msg = f"{spmatrix!r} {badop!r}"
# Not C-contiguous
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, badop(A.data), A.indices, A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, badop(A.indices), A.indptr,
b, int(spmatrix == csc_matrix), err_msg=msg)
assert_raises((ValueError, TypeError), _superlu.gssv,
N, A.nnz, A.data, A.indices, badop(A.indptr),
b, int(spmatrix == csc_matrix), err_msg=msg)
def test_sparsity_preservation(self):
ident = csc_matrix([
[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
b = csc_matrix([
[0, 1],
[1, 0],
[0, 0]])
x = spsolve(ident, b)
assert_equal(ident.nnz, 3)
assert_equal(b.nnz, 2)
assert_equal(x.nnz, 2)
assert_allclose(x.toarray(), b.toarray(), atol=1e-12, rtol=1e-12)
def test_dtype_cast(self):
A_real = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5]])
A_complex = scipy.sparse.csr_matrix([[1, 2, 0],
[0, 0, 3],
[4, 0, 5 + 1j]])
b_real = np.array([1,1,1])
b_complex = np.array([1,1,1]) + 1j*np.array([1,1,1])
x = spsolve(A_real, b_real)
assert_(np.issubdtype(x.dtype, np.floating))
x = spsolve(A_real, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_real)
assert_(np.issubdtype(x.dtype, np.complexfloating))
x = spsolve(A_complex, b_complex)
assert_(np.issubdtype(x.dtype, np.complexfloating))
@pytest.mark.slow
@pytest.mark.skipif(not has_umfpack, reason="umfpack not available")
def test_bug_8278(self):
check_free_memory(8000)
use_solver(useUmfpack=True)
A, b = setup_bug_8278()
x = spsolve(A, b)
assert_array_almost_equal(A @ x, b)
class TestSplu:
def setup_method(self):
use_solver(useUmfpack=False)
n = 40
d = arange(n) + 1
self.n = n
self.A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n, format='csc')
random.seed(1234)
def _smoketest(self, spxlu, check, dtype, idx_dtype):
if np.issubdtype(dtype, np.complexfloating):
A = self.A + 1j*self.A.T
else:
A = self.A
A = A.astype(dtype)
A.indices = A.indices.astype(idx_dtype, copy=False)
A.indptr = A.indptr.astype(idx_dtype, copy=False)
lu = spxlu(A)
rng = random.RandomState(1234)
# Input shapes
for k in [None, 1, 2, self.n, self.n+2]:
msg = f"k={k!r}"
if k is None:
b = rng.rand(self.n)
else:
b = rng.rand(self.n, k)
if np.issubdtype(dtype, np.complexfloating):
b = b + 1j*rng.rand(*b.shape)
b = b.astype(dtype)
x = lu.solve(b)
check(A, b, x, msg)
x = lu.solve(b, 'T')
check(A.T, b, x, msg)
x = lu.solve(b, 'H')
check(A.T.conj(), b, x, msg)
@sup_sparse_efficiency
def test_splu_smoketest(self):
self._internal_test_splu_smoketest()
def _internal_test_splu_smoketest(self):
# Check that splu works at all
def check(A, b, x, msg=""):
eps = np.finfo(A.dtype).eps
r = A @ x
assert_(abs(r - b).max() < 1e3*eps, msg)
for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
for idx_dtype in [np.int32, np.int64]:
self._smoketest(splu, check, dtype, idx_dtype)
@sup_sparse_efficiency
def test_spilu_smoketest(self):
self._internal_test_spilu_smoketest()
def _internal_test_spilu_smoketest(self):
errors = []
def check(A, b, x, msg=""):
r = A @ x
err = abs(r - b).max()
assert_(err < 1e-2, msg)
if b.dtype in (np.float64, np.complex128):
errors.append(err)
for dtype in [np.float32, np.float64, np.complex64, np.complex128]:
for idx_dtype in [np.int32, np.int64]:
self._smoketest(spilu, check, dtype, idx_dtype)
assert_(max(errors) > 1e-5)
@sup_sparse_efficiency
def test_spilu_drop_rule(self):
# Test passing in the drop_rule argument to spilu.
A = identity(2)
rules = [
b'basic,area'.decode('ascii'), # unicode
b'basic,area', # ascii
[b'basic', b'area'.decode('ascii')]
]
for rule in rules:
# Argument should be accepted
assert_(isinstance(spilu(A, drop_rule=rule), SuperLU))
def test_splu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, splu, A)
def test_spilu_nnz0(self):
A = csc_matrix((5,5), dtype='d')
assert_raises(RuntimeError, spilu, A)
def test_splu_basic(self):
# Test basic splu functionality.
n = 30
rng = random.RandomState(12)
a = rng.rand(n, n)
a[a < 0.95] = 0
# First test with a singular matrix
a[:, 0] = 0
a_ = csc_matrix(a)
# Matrix is exactly singular
assert_raises(RuntimeError, splu, a_)
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
b = ones(n)
x = lu.solve(b)
assert_almost_equal(dot(a, x), b)
def test_splu_perm(self):
# Test the permutation vectors exposed by splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# Check that the permutation indices do belong to [0, n-1].
for perm in (lu.perm_r, lu.perm_c):
assert_(all(perm > -1))
assert_(all(perm < n))
assert_equal(len(unique(perm)), len(perm))
# Now make a symmetric, and test that the two permutation vectors are
# the same
# Note: a += a.T relies on undefined behavior.
a = a + a.T
a_ = csc_matrix(a)
lu = splu(a_)
assert_array_equal(lu.perm_r, lu.perm_c)
@pytest.mark.parametrize("splu_fun, rtol", [(splu, 1e-7), (spilu, 1e-1)])
def test_natural_permc(self, splu_fun, rtol):
# Test that the "NATURAL" permc_spec does not permute the matrix
np.random.seed(42)
n = 500
p = 0.01
A = scipy.sparse.random(n, n, p)
x = np.random.rand(n)
# Make A diagonal dominant to make sure it is not singular
A += (n+1)*scipy.sparse.identity(n)
A_ = csc_matrix(A)
b = A_ @ x
# without permc_spec, permutation is not identity
lu = splu_fun(A_)
assert_(np.any(lu.perm_c != np.arange(n)))
# with permc_spec="NATURAL", permutation is identity
lu = splu_fun(A_, permc_spec="NATURAL")
assert_array_equal(lu.perm_c, np.arange(n))
# Also, lu decomposition is valid
x2 = lu.solve(b)
assert_allclose(x, x2, rtol=rtol)
@pytest.mark.skipif(not hasattr(sys, 'getrefcount'), reason="no sys.getrefcount")
def test_lu_refcount(self):
# Test that we are keeping track of the reference count with splu.
n = 30
a = random.random((n, n))
a[a < 0.95] = 0
# Make a diagonal dominant, to make sure it is not singular
a += 4*eye(n)
a_ = csc_matrix(a)
lu = splu(a_)
# And now test that we don't have a refcount bug
rc = sys.getrefcount(lu)
for attr in ('perm_r', 'perm_c'):
perm = getattr(lu, attr)
assert_equal(sys.getrefcount(lu), rc + 1)
del perm
assert_equal(sys.getrefcount(lu), rc)
def test_bad_inputs(self):
A = self.A.tocsc()
assert_raises(ValueError, splu, A[:,:4])
assert_raises(ValueError, spilu, A[:,:4])
for lu in [splu(A), spilu(A)]:
b = random.rand(42)
B = random.rand(42, 3)
BB = random.rand(self.n, 3, 9)
assert_raises(ValueError, lu.solve, b)
assert_raises(ValueError, lu.solve, B)
assert_raises(ValueError, lu.solve, BB)
assert_raises(TypeError, lu.solve,
b.astype(np.complex64))
assert_raises(TypeError, lu.solve,
b.astype(np.complex128))
@sup_sparse_efficiency
def test_superlu_dlamch_i386_nan(self):
# SuperLU 4.3 calls some functions returning floats without
# declaring them. On i386@linux call convention, this fails to
# clear floating point registers after call. As a result, NaN
# can appear in the next floating point operation made.
#
# Here's a test case that triggered the issue.
n = 8
d = np.arange(n) + 1
A = spdiags((d, 2*d, d[::-1]), (-3, 0, 5), n, n)
A = A.astype(np.float32)
spilu(A)
A = A + 1j*A
B = A.toarray()
assert_(not np.isnan(B).any())
@sup_sparse_efficiency
def test_lu_attr(self):
def check(dtype, complex_2=False):
A = self.A.astype(dtype)
if complex_2:
A = A + 1j*A.T
n = A.shape[0]
lu = splu(A)
# Check that the decomposition is as advertised
Pc = np.zeros((n, n))
Pc[np.arange(n), lu.perm_c] = 1
Pr = np.zeros((n, n))
Pr[lu.perm_r, np.arange(n)] = 1
Ad = A.toarray()
lhs = Pr.dot(Ad).dot(Pc)
rhs = (lu.L @ lu.U).toarray()
eps = np.finfo(dtype).eps
assert_allclose(lhs, rhs, atol=100*eps)
check(np.float32)
check(np.float64)
check(np.complex64)
check(np.complex128)
check(np.complex64, True)
check(np.complex128, True)
@pytest.mark.slow
@sup_sparse_efficiency
def test_threads_parallel(self):
oks = []
def worker():
try:
self.test_splu_basic()
self._internal_test_splu_smoketest()
self._internal_test_spilu_smoketest()
oks.append(True)
except Exception:
pass
threads = [threading.Thread(target=worker)
for k in range(20)]
for t in threads:
t.start()
for t in threads:
t.join()
assert_equal(len(oks), 20)
def test_singular_matrix(self):
# Test that SuperLU does not print to stdout when a singular matrix is
# passed. See gh-20993.
A = identity(10, format='csr').tocsr()
A[-1, -1] = 0
b = np.zeros(10)
with pytest.warns(MatrixRankWarning):
res = spsolve(A, b)
assert np.isnan(res).all()
class TestGstrsErrors:
def setup_method(self):
self.A = array([[1.0,2.0,3.0],[4.0,5.0,6.0],[7.0,8.0,9.0]], dtype=np.float64)
self.b = np.array([[1.0],[2.0],[3.0]], dtype=np.float64)
def test_trans(self):
L = scipy.sparse.tril(self.A, format='csc')
U = scipy.sparse.triu(self.A, k=1, format='csc')
with assert_raises(ValueError, match="trans must be N, T, or H"):
_superlu.gstrs('X', L.shape[0], L.nnz, L.data, L.indices, L.indptr,
U.shape[0], U.nnz, U.data, U.indices, U.indptr, self.b)
def test_shape_LU(self):
L = scipy.sparse.tril(self.A[0:2,0:2], format='csc')
U = scipy.sparse.triu(self.A, k=1, format='csc')
with assert_raises(ValueError, match="L and U must have the same dimension"):
_superlu.gstrs('N', L.shape[0], L.nnz, L.data, L.indices, L.indptr,
U.shape[0], U.nnz, U.data, U.indices, U.indptr, self.b)
def test_shape_b(self):
L = scipy.sparse.tril(self.A, format='csc')
U = scipy.sparse.triu(self.A, k=1, format='csc')
with assert_raises(ValueError, match="right hand side array has invalid shape"):
_superlu.gstrs('N', L.shape[0], L.nnz, L.data, L.indices, L.indptr,
U.shape[0], U.nnz, U.data, U.indices, U.indptr,
self.b[0:2])
def test_types_differ(self):
L = scipy.sparse.tril(self.A.astype(np.float32), format='csc')
U = scipy.sparse.triu(self.A, k=1, format='csc')
with assert_raises(TypeError, match="nzvals types of L and U differ"):
_superlu.gstrs('N', L.shape[0], L.nnz, L.data, L.indices, L.indptr,
U.shape[0], U.nnz, U.data, U.indices, U.indptr, self.b)
def test_types_unsupported(self):
L = scipy.sparse.tril(self.A.astype(np.uint8), format='csc')
U = scipy.sparse.triu(self.A.astype(np.uint8), k=1, format='csc')
with assert_raises(TypeError, match="nzvals is not of a type supported"):
_superlu.gstrs('N', L.shape[0], L.nnz, L.data, L.indices, L.indptr,
U.shape[0], U.nnz, U.data, U.indices, U.indptr,
self.b.astype(np.uint8))
class TestSpsolveTriangular:
def setup_method(self):
use_solver(useUmfpack=False)
@pytest.mark.parametrize("fmt",["csr","csc"])
def test_zero_diagonal(self,fmt):
n = 5
rng = np.random.default_rng(43876432987)
A = rng.standard_normal((n, n))
b = np.arange(n)
A = scipy.sparse.tril(A, k=0, format=fmt)
x = spsolve_triangular(A, b, unit_diagonal=True, lower=True)
A.setdiag(1)
assert_allclose(A.dot(x), b)
# Regression test from gh-15199
A = np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0]], dtype=np.float64)
b = np.array([1., 2., 3.])
with suppress_warnings() as sup:
sup.filter(SparseEfficiencyWarning, "CSC or CSR matrix format is")
spsolve_triangular(A, b, unit_diagonal=True)
@pytest.mark.parametrize("fmt",["csr","csc"])
def test_singular(self,fmt):
n = 5
if fmt == "csr":
A = csr_matrix((n, n))
else:
A = csc_matrix((n, n))
b = np.arange(n)
for lower in (True, False):
assert_raises(scipy.linalg.LinAlgError,
spsolve_triangular, A, b, lower=lower)
@sup_sparse_efficiency
def test_bad_shape(self):
# A is not square.
A = np.zeros((3, 4))
b = ones((4, 1))
assert_raises(ValueError, spsolve_triangular, A, b)
# A2 and b2 have incompatible shapes.
A2 = csr_matrix(eye(3))
b2 = array([1.0, 2.0])
assert_raises(ValueError, spsolve_triangular, A2, b2)
@sup_sparse_efficiency
def test_input_types(self):
A = array([[1., 0.], [1., 2.]])
b = array([[2., 0.], [2., 2.]])
for matrix_type in (array, csc_matrix, csr_matrix):
x = spsolve_triangular(matrix_type(A), b, lower=True)
assert_array_almost_equal(A.dot(x), b)
@pytest.mark.slow
@sup_sparse_efficiency
@pytest.mark.parametrize("n", [10, 10**2, 10**3])
@pytest.mark.parametrize("m", [1, 10])
@pytest.mark.parametrize("lower", [True, False])
@pytest.mark.parametrize("format", ["csr", "csc"])
@pytest.mark.parametrize("unit_diagonal", [False, True])
@pytest.mark.parametrize("choice_of_A", ["real", "complex"])
@pytest.mark.parametrize("choice_of_b", ["floats", "ints", "complexints"])
def test_random(self, n, m, lower, format, unit_diagonal, choice_of_A, choice_of_b):
def random_triangle_matrix(n, lower=True, format="csr", choice_of_A="real"):
if choice_of_A == "real":
dtype = np.float64
elif choice_of_A == "complex":
dtype = np.complex128
else:
raise ValueError("choice_of_A must be 'real' or 'complex'.")
rng = np.random.default_rng(789002319)
rvs = rng.random
A = scipy.sparse.random(n, n, density=0.1, format='lil', dtype=dtype,
random_state=rng, data_rvs=rvs)
if lower:
A = scipy.sparse.tril(A, format="lil")
else:
A = scipy.sparse.triu(A, format="lil")
for i in range(n):
A[i, i] = np.random.rand() + 1
if format == "csc":
A = A.tocsc(copy=False)
else:
A = A.tocsr(copy=False)
return A
np.random.seed(1234)
A = random_triangle_matrix(n, lower=lower)
if choice_of_b == "floats":
b = np.random.rand(n, m)
elif choice_of_b == "ints":
b = np.random.randint(-9, 9, (n, m))
elif choice_of_b == "complexints":
b = np.random.randint(-9, 9, (n, m)) + np.random.randint(-9, 9, (n, m)) * 1j
else:
raise ValueError(
"choice_of_b must be 'floats', 'ints', or 'complexints'.")
x = spsolve_triangular(A, b, lower=lower, unit_diagonal=unit_diagonal)
if unit_diagonal:
A.setdiag(1)
assert_allclose(A.dot(x), b, atol=1.5e-6)