Coverage for python/src/dolfinx_mpc/utils/test.py: 83%
141 statements
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-03 13:59 +0000
« prev ^ index » next coverage.py v7.8.0, created at 2025-04-03 13:59 +0000
1# Copyright (C) 2021-2022 Jørgen Schartum Dokken and Connor D. Pierce
2#
3# This file is part of DOLFINX_MPC
4#
5# SPDX-License-Identifier: MIT
6from __future__ import annotations
8__all__ = [
9 "gather_PETScVector",
10 "gather_PETScMatrix",
11 "compare_mpc_lhs",
12 "compare_mpc_rhs",
13 "gather_transformation_matrix",
14 "compare_CSR",
15]
17from typing import Any
19from mpi4py import MPI
20from petsc4py import PETSc
22import dolfinx.common
23import numpy as np
24import scipy.sparse
26import dolfinx_mpc
29def _gather_slaves_global(constraint):
30 """
31 Given a multi point constraint, return slaves for all processors with global dof numbering
32 """
33 imap = constraint.function_space.dofmap.index_map
34 num_local_slaves = constraint.num_local_slaves
35 block_size = constraint.function_space.dofmap.index_map_bs
36 _slaves = constraint.slaves
37 if num_local_slaves > 0:
38 slave_blocks = _slaves[:num_local_slaves] // block_size
39 slave_rems = _slaves[:num_local_slaves] % block_size
40 glob_slaves = np.asarray(imap.local_to_global(slave_blocks), dtype=np.int64) * block_size + slave_rems
41 else:
42 glob_slaves = np.array([], dtype=np.int64)
44 slaves = np.hstack(MPI.COMM_WORLD.allgather(glob_slaves))
45 return slaves
48def gather_constants(constraint, root=0):
49 """
50 Given a multi-point constraint, gather all constants
51 """
52 imap = constraint.index_map()
53 constants = constraint._cpp_object.constants
54 l_range = imap.local_range
55 ranges = MPI.COMM_WORLD.gather(np.asarray(l_range, dtype=np.int64), root=root)
56 g_consts = MPI.COMM_WORLD.gather(constants[: l_range[1] - l_range[0]], root=root)
57 if MPI.COMM_WORLD.rank == root:
58 block_size = constraint.function_space().dofmap.index_map_bs
59 global_consts = np.zeros(imap.size_global * block_size, dtype=constraint.coefficients()[0].dtype)
60 for r, vals in zip(ranges, g_consts):
61 global_consts[r[0] : r[1]] = vals
62 return global_consts
63 else:
64 return
67def gather_transformation_matrix(constraint, root=0):
68 """
69 Creates the transformation matrix K (dim x dim-len(slaves)) for a given MPC
70 and gathers it as a scipy CSR matrix on process 'root'.
72 Example:
74 For dim=3, where:
75 u_1 = alpha u_0 + beta u_2
77 Input:
78 slaves = [1]
79 masters = [0, 2]
80 coeffs = [alpha, beta]
81 offsets = [0, 1]
83 Output:
84 K = [[1,0], [alpha beta], [0,1]]
85 """
86 # Gather slaves from all procs
87 V = constraint.V
88 imap = constraint.function_space.dofmap.index_map
89 block_size = V.dofmap.index_map_bs
90 num_local_slaves = constraint.num_local_slaves
91 # Gather all global_slaves
92 slaves = constraint.slaves[:num_local_slaves]
93 if num_local_slaves > 0:
94 local_blocks = slaves // block_size
95 local_rems = slaves % block_size
96 glob_slaves = np.asarray(imap.local_to_global(local_blocks), dtype=np.int64) * block_size + local_rems
97 else:
98 glob_slaves = np.array([], dtype=np.int64)
99 all_slaves = np.hstack(MPI.COMM_WORLD.allgather(glob_slaves))
100 masters = constraint.masters.array
101 master_blocks = masters // block_size
102 master_rems = masters % block_size
103 coeffs = constraint.coefficients()[0]
104 offsets = constraint.masters.offsets
105 # Create sparse K matrix
106 K_val, rows, cols = [], [], []
108 # Add local contributions to K from local slaves
109 for slave, global_slave in zip(slaves, glob_slaves):
110 masters_index = (
111 np.asarray(
112 imap.local_to_global(master_blocks[offsets[slave] : offsets[slave + 1]]),
113 dtype=np.int64,
114 )
115 * block_size
116 + master_rems[offsets[slave] : offsets[slave + 1]]
117 )
118 coeffs_index = coeffs[offsets[slave] : offsets[slave + 1]]
119 # If we have a simply equality constraint (dirichletbc)
120 if len(masters_index) > 0:
121 for master, coeff in zip(masters_index, coeffs_index):
122 count = sum(master > all_slaves)
123 K_val.append(coeff)
124 rows.append(global_slave)
125 cols.append(master - count)
126 else:
127 K_val.append(1)
128 count = sum(global_slave > all_slaves)
129 rows.append(global_slave)
130 cols.append(global_slave - count)
132 # Add identity for all dofs on diagonal
133 l_range = V.dofmap.index_map.local_range
134 global_dofs = np.arange(l_range[0] * block_size, l_range[1] * block_size)
135 is_slave = np.isin(global_dofs, glob_slaves)
136 for i, dof in enumerate(global_dofs):
137 if not is_slave[i]:
138 K_val.append(1)
139 rows.append(dof)
140 cols.append(dof - sum(dof > all_slaves))
142 # Gather K to root
143 K_vals = MPI.COMM_WORLD.gather(np.asarray(K_val, dtype=coeffs.dtype), root=root)
144 rows_g = MPI.COMM_WORLD.gather(np.asarray(rows, dtype=np.int64), root=root)
145 cols_g = MPI.COMM_WORLD.gather(np.asarray(cols, dtype=np.int64), root=root)
147 if MPI.COMM_WORLD.rank == root:
148 K_sparse = scipy.sparse.coo_matrix((np.hstack(K_vals), (np.hstack(rows_g), np.hstack(cols_g)))).tocsr()
149 return K_sparse
152def petsc_to_local_CSR(A: PETSc.Mat, mpc: dolfinx_mpc.MultiPointConstraint): # type: ignore
153 """
154 Convert a PETSc matrix to a local CSR matrix (scipy) including ghost entries
155 """
156 global_indices = np.asarray(mpc.function_space.dofmap.index_map.global_indices(), dtype=PETSc.IntType) # type: ignore
157 sort_index = np.argsort(global_indices)
158 is_A = PETSc.IS().createGeneral(global_indices[sort_index]) # type: ignore
159 A_loc = A.createSubMatrices(is_A)[0]
160 ai, aj, av = A_loc.getValuesCSR()
161 A_csr = scipy.sparse.csr_matrix((av, aj, ai))
162 return A_csr[global_indices[:, None], global_indices]
165def gather_PETScMatrix(A: PETSc.Mat, root=0) -> scipy.sparse.csr_matrix: # type: ignore
166 """
167 Given a distributed PETSc matrix, gather in on process 'root' in
168 a scipy CSR matrix
169 """
170 ai, aj, av = A.getValuesCSR()
171 aj_all = MPI.COMM_WORLD.gather(aj, root=root) # type: ignore
172 av_all = MPI.COMM_WORLD.gather(av, root=root) # type: ignore
173 ai_all = MPI.COMM_WORLD.gather(ai, root=root) # type: ignore
174 if MPI.COMM_WORLD.rank == root:
175 ai_cum = [0]
176 for ai in ai_all: # type: ignore
177 offsets = ai[1:] + ai_cum[-1]
178 ai_cum.extend(offsets)
179 return scipy.sparse.csr_matrix((np.hstack(av_all), np.hstack(aj_all), ai_cum), shape=A.getSize()) # type: ignore
182def gather_PETScVector(vector: PETSc.Vec, root=0) -> np.ndarray: # type: ignore
183 """
184 Gather a PETScVector from different processors on
185 process 'root' as an numpy array
186 """
187 if vector.handle == 0:
188 raise RuntimeError("Vector has been destroyed prior to this call")
189 numpy_vec = np.zeros(vector.size, dtype=vector.array.dtype)
190 l_min = vector.owner_range[0]
191 l_max = vector.owner_range[1]
192 numpy_vec[l_min:l_max] += vector.array
193 return np.asarray(sum(MPI.COMM_WORLD.allgather(numpy_vec)))
196def compare_CSR(A: scipy.sparse.csr_matrix, B: scipy.sparse.csr_matrix, atol=1e-10):
197 """Compare CSR matrices A and B"""
198 diff = np.abs(A - B)
199 assert diff.max() < atol
202def compare_mpc_lhs(
203 A_org: PETSc.Mat, # type: ignore
204 A_mpc: PETSc.Mat, # type: ignore
205 mpc: dolfinx_mpc.MultiPointConstraint,
206 root: int = 0,
207 atol: np.floating[Any] = 5e3 * np.finfo(dolfinx.default_scalar_type).resolution,
208):
209 """
210 Compare an unmodified matrix for the problem with the one assembled with a
211 multi point constraint.
213 The unmodified matrix is multiplied with K^T A K, where K is the global transformation matrix.
214 """
215 timer = dolfinx.common.Timer("~MPC: Compare matrices")
216 comm = mpc.V.mesh.comm
217 V = mpc.V
218 assert root < comm.size
219 is_complex = np.issubdtype(mpc.coefficients()[0].dtype, np.complexfloating) # type: ignore
220 scipy_dtype = np.complex128 if is_complex else np.float64
221 K = gather_transformation_matrix(mpc, root=root)
222 A_csr = gather_PETScMatrix(A_org, root=root)
223 # Get global slaves
224 glob_slaves = _gather_slaves_global(mpc)
225 A_mpc_csr = gather_PETScMatrix(A_mpc, root=root)
226 if MPI.COMM_WORLD.rank == root:
227 K = K.astype(scipy_dtype)
228 A_csr = A_csr.astype(scipy_dtype)
229 KTAK = np.conj(K.T) * A_csr * K
231 # Remove identity rows of MPC matrix
232 all_cols = np.arange(V.dofmap.index_map.size_global * V.dofmap.index_map_bs)
234 cols_except_slaves = np.flatnonzero(np.isin(all_cols, glob_slaves, invert=True).astype(np.int32))
235 mpc_without_slaves = A_mpc_csr[cols_except_slaves[:, None], cols_except_slaves]
237 # Compute difference
238 compare_CSR(KTAK, mpc_without_slaves, atol=atol)
240 timer.stop()
243def compare_mpc_rhs(
244 b_org: PETSc.Vec, # type: ignore
245 b: PETSc.Vec, # type: ignore
246 constraint: dolfinx_mpc.MultiPointConstraint,
247 root: int = 0,
248):
249 """
250 Compare an unconstrained RHS with an MPC rhs.
251 """
252 glob_slaves = _gather_slaves_global(constraint)
253 b_org_np = gather_PETScVector(b_org, root=root)
254 b_np = gather_PETScVector(b, root=root)
255 K = gather_transformation_matrix(constraint, root=root)
256 # constants = gather_constants(constraint)
257 comm = constraint.V.mesh.comm
258 if comm.rank == root:
259 reduced_b = np.conj(K.T) @ b_org_np # - constants for RHS mpc
260 all_cols = np.arange(constraint.V.dofmap.index_map.size_global * constraint.V.dofmap.index_map_bs)
261 cols_except_slaves = np.flatnonzero(np.isin(all_cols, glob_slaves, invert=True).astype(np.int32))
262 assert np.allclose(b_np[glob_slaves], 0)
263 assert np.allclose(b_np[cols_except_slaves], reduced_b)