Coverage for python/src/dolfinx_mpc/utils/mpc_utils.py: 88%
266 statements
« prev ^ index » next coverage.py v7.9.2, created at 2025-07-11 21:02 +0000
« prev ^ index » next coverage.py v7.9.2, created at 2025-07-11 21:02 +0000
1# Copyright (C) 2020-2021 Jørgen S. Dokken
2#
3# This file is part of DOLFINX_MPC
4#
5# SPDX-License-Identifier: MIT
6from __future__ import annotations
8from mpi4py import MPI
9from petsc4py import PETSc
11import dolfinx.common as _common
12import dolfinx.cpp as _cpp
13import dolfinx.fem as _fem
14import dolfinx.geometry as _geometry
15import dolfinx.la as _la
16import dolfinx.log as _log
17import dolfinx.mesh as _mesh
18import numpy as np
19import ufl
20from dolfinx import default_scalar_type as _dt
22import dolfinx_mpc.cpp
24__all__ = [
25 "rotation_matrix",
26 "facet_normal_approximation",
27 "log_info",
28 "rigid_motions_nullspace",
29 "determine_closest_block",
30 "create_normal_approximation",
31 "create_point_to_point_constraint",
32]
35def rotation_matrix(axis, angle):
36 # See https://en.wikipedia.org/wiki/Rotation_matrix,
37 # Subsection: Rotation_matrix_from_axis_and_angle.
38 if np.isclose(np.inner(axis, axis), 1):
39 n_axis = axis
40 else:
41 # Normalize axis
42 n_axis = axis / np.sqrt(np.inner(axis, axis))
44 # Define cross product matrix of axis
45 axis_x = np.array([[0, -n_axis[2], n_axis[1]], [n_axis[2], 0, -n_axis[0]], [-n_axis[1], n_axis[0], 0]])
46 identity = np.cos(angle) * np.eye(3)
47 outer = (1 - np.cos(angle)) * np.outer(n_axis, n_axis)
48 return np.sin(angle) * axis_x + identity + outer
51def facet_normal_approximation(
52 V,
53 mt: _mesh.MeshTags,
54 mt_id: int,
55 tangent=False,
56 jit_options: dict = {},
57 form_compiler_options: dict = {},
58):
59 """
60 Approximate the facet normal by projecting it into the function space for a set of facets
62 Args:
63 V: The function space to project into
64 mt: The `dolfinx.mesh.MeshTagsMetaClass` containing facet markers
65 mt_id: The id for the facets in `mt` we want to represent the normal at
66 tangent: To approximate the tangent to the facet set this flag to `True`
67 jit_options: Parameters used in CFFI JIT compilation of C code generated by FFCx.
68 See https://github.com/FEniCS/dolfinx/blob/main/python/dolfinx/jit.py#L22-L37
69 for all available parameters. Takes priority over all other parameter values.
70 form_compiler_options: Parameters used in FFCx compilation of this form. Run `ffcx - -help` at
71 the commandline to see all available options. Takes priority over all
72 other parameter values, except for `scalar_type` which is determined by
73 DOLFINx.
74 """
75 timer = _common.Timer("~MPC: Facet normal projection")
76 comm = V.mesh.comm
77 n = ufl.FacetNormal(V.mesh)
78 nh = _fem.Function(V)
79 u, v = ufl.TrialFunction(V), ufl.TestFunction(V)
80 ds = ufl.ds(domain=V.mesh, subdomain_data=mt, subdomain_id=mt_id)
81 if tangent:
82 if V.mesh.geometry.dim == 1:
83 raise ValueError("Tangent not defined for 1D problem")
84 elif V.mesh.geometry.dim == 2:
85 a = ufl.inner(u, v) * ds
86 L = ufl.inner(ufl.as_vector([-n[1], n[0]]), v) * ds
87 else:
89 def tangential_proj(u, n):
90 """
91 See for instance:
92 https://link.springer.com/content/pdf/10.1023/A:1022235512626.pdf
93 """
94 return (ufl.Identity(u.ufl_shape[0]) - ufl.outer(n, n)) * u
96 c = _fem.Constant(V.mesh, [1, 1, 1])
97 a = ufl.inner(u, v) * ds
98 L = ufl.inner(tangential_proj(c, n), v) * ds
99 else:
100 a = ufl.inner(u, v) * ds
101 L = ufl.inner(n, v) * ds
103 # Find all dofs that are not boundary dofs
104 imap = V.dofmap.index_map
105 all_blocks = np.arange(imap.size_local, dtype=np.int32)
106 top_blocks = _fem.locate_dofs_topological(V, V.mesh.topology.dim - 1, mt.find(mt_id))
107 deac_blocks = all_blocks[np.isin(all_blocks, top_blocks, invert=True)]
109 # Note there should be a better way to do this
110 # Create sparsity pattern only for constraint + bc
111 bilinear_form = _fem.form(a, jit_options=jit_options, form_compiler_options=form_compiler_options)
112 pattern = _fem.create_sparsity_pattern(bilinear_form)
113 pattern.insert_diagonal(deac_blocks)
114 pattern.finalize()
115 u_0 = _fem.Function(V)
116 u_0.x.petsc_vec.set(0)
118 bc_deac = _fem.dirichletbc(u_0, deac_blocks)
119 A = _cpp.la.petsc.create_matrix(comm, pattern)
120 A.zeroEntries()
122 # Assemble the matrix with all entries
123 form_coeffs = _cpp.fem.pack_coefficients(bilinear_form._cpp_object)
124 form_consts = _cpp.fem.pack_constants(bilinear_form._cpp_object)
125 _cpp.fem.petsc.assemble_matrix(A, bilinear_form._cpp_object, form_consts, form_coeffs, [bc_deac._cpp_object])
126 if bilinear_form.function_spaces[0] is bilinear_form.function_spaces[1]:
127 A.assemblyBegin(PETSc.Mat.AssemblyType.FLUSH) # type: ignore
128 A.assemblyEnd(PETSc.Mat.AssemblyType.FLUSH) # type: ignore
129 _cpp.fem.petsc.insert_diagonal(A, bilinear_form.function_spaces[0], [bc_deac._cpp_object], 1.0)
130 A.assemble()
131 linear_form = _fem.form(L, jit_options=jit_options, form_compiler_options=form_compiler_options)
132 b = _fem.petsc.assemble_vector(linear_form)
134 _fem.petsc.apply_lifting(b, [bilinear_form], [[bc_deac]])
135 b.ghostUpdate(addv=PETSc.InsertMode.ADD_VALUES, mode=PETSc.ScatterMode.REVERSE) # type: ignore
136 _fem.petsc.set_bc(b, [bc_deac])
138 # Solve Linear problem
139 solver = PETSc.KSP().create(V.mesh.comm) # type: ignore
140 solver.setType("cg")
141 solver.rtol = 1e-8
142 solver.setOperators(A)
143 solver.solve(b, nh.x.petsc_vec)
144 nh.x.petsc_vec.ghostUpdate(addv=PETSc.InsertMode.INSERT, mode=PETSc.ScatterMode.FORWARD) # type: ignore
145 timer.stop()
146 solver.destroy()
147 b.destroy()
148 return nh
151def log_info(message):
152 """
153 Wrapper for logging a simple string on the zeroth communicator
154 Reverting the log level
155 """
156 old_level = _log.get_log_level()
157 if MPI.COMM_WORLD.rank == 0:
158 _log.set_log_level(_log.LogLevel.INFO)
159 _log.log(_log.LogLevel.INFO, message)
160 _log.set_log_level(old_level)
163def rigid_motions_nullspace(V: _fem.FunctionSpace):
164 """
165 Function to build nullspace for 2D/3D elasticity.
167 Args:
168 V: The function space
169 """
170 _x = _fem.Function(V)
171 # Get geometric dim
172 gdim = V.mesh.geometry.dim
173 assert gdim == 2 or gdim == 3
175 # Set dimension of nullspace
176 dim = 3 if gdim == 2 else 6
178 # Create list of vectors for null space
179 nullspace_basis = [
180 _la.vector(V.dofmap.index_map, bs=V.dofmap.index_map_bs, dtype=PETSc.ScalarType) # type: ignore
181 for i in range(dim)
182 ]
184 basis = [b.array for b in nullspace_basis]
185 dofs = [V.sub(i).dofmap.list.reshape(-1) for i in range(gdim)]
187 # Build translational null space basis
188 for i in range(gdim):
189 basis[i][dofs[i]] = 1.0
190 # Build rotational null space basis
191 x = V.tabulate_dof_coordinates()
192 dofs_block = V.dofmap.list.reshape(-1)
193 x0, x1, x2 = x[dofs_block, 0], x[dofs_block, 1], x[dofs_block, 2]
194 if gdim == 2:
195 basis[2][dofs[0]] = -x1
196 basis[2][dofs[1]] = x0
197 elif gdim == 3:
198 basis[3][dofs[0]] = -x1
199 basis[3][dofs[1]] = x0
201 basis[4][dofs[0]] = x2
202 basis[4][dofs[2]] = -x0
203 basis[5][dofs[2]] = x1
204 basis[5][dofs[1]] = -x2
205 for b in nullspace_basis:
206 b.scatter_forward()
208 _la.orthonormalize(nullspace_basis)
209 assert _la.is_orthonormal(nullspace_basis, float(np.finfo(_x.x.array.dtype).eps))
210 local_size = V.dofmap.index_map.size_local * V.dofmap.index_map_bs
211 basis_petsc = [
212 PETSc.Vec().createWithArray(x[:local_size], bsize=gdim, comm=V.mesh.comm) # type: ignore
213 for x in basis
214 ]
215 return PETSc.NullSpace().create(comm=V.mesh.comm, vectors=basis_petsc) # type: ignore
218def determine_closest_block(V, point):
219 """
220 Determine the closest dofs (in a single block) to a point and the distance
221 """
222 # Create boundingboxtree of cells connected to boundary facets
223 tdim = V.mesh.topology.dim
224 boundary_facets = _mesh.exterior_facet_indices(V.mesh.topology)
225 V.mesh.topology.create_connectivity(tdim - 1, tdim)
226 f_to_c = V.mesh.topology.connectivity(tdim - 1, tdim)
227 boundary_cells = []
228 for facet in boundary_facets:
229 boundary_cells.extend(f_to_c.links(facet))
230 cell_imap = V.mesh.topology.index_map(tdim)
231 boundary_cells = np.array(np.unique(boundary_cells), dtype=np.int32)
232 boundary_cells = boundary_cells[boundary_cells < cell_imap.size_local]
233 bb_tree = _geometry.bb_tree(V.mesh, tdim, boundary_cells)
234 midpoint_tree = _geometry.create_midpoint_tree(V.mesh, tdim, boundary_cells)
236 # Find facet closest
237 point = np.reshape(point, (1, 3)).astype(V.mesh.geometry.x.dtype)
238 closest_cell = _geometry.compute_closest_entity(bb_tree, midpoint_tree, V.mesh, point)[0]
240 # Set distance high if cell is not owned
241 if cell_imap.size_local < closest_cell or closest_cell == -1:
242 R = 1e5
243 else:
244 # Get cell geometry
245 p = V.mesh.geometry.x
246 V.mesh.topology.create_connectivity(tdim, tdim)
247 entities = _mesh.entities_to_geometry(V.mesh, tdim, np.array([closest_cell], dtype=np.int32), False)
248 R = np.linalg.norm(_geometry.compute_distance_gjk(point, p[entities[0]]))
250 # Find processor with cell closest to point
251 global_distances = MPI.COMM_WORLD.allgather(R)
252 owning_processor = np.argmin(global_distances)
254 dofmap = V.dofmap
255 imap = dofmap.index_map
256 ghost_owner = imap.owners
257 local_max = imap.size_local
258 # Determine which block of dofs is closest
259 min_distance = max(R, 1e5)
260 minimal_distance_block = None
261 min_dof_owner = owning_processor
262 if MPI.COMM_WORLD.rank == owning_processor:
263 x = V.tabulate_dof_coordinates()
264 cell_blocks = dofmap.cell_dofs(closest_cell)
265 for block in cell_blocks:
266 distance = np.linalg.norm(_geometry.compute_distance_gjk(point, x[block]))
267 if distance < min_distance:
268 # If cell owned by processor, but not the closest dof
269 if block < local_max:
270 min_dof_owner = MPI.COMM_WORLD.rank
271 else:
272 min_dof_owner = ghost_owner[block - local_max]
273 minimal_distance_block = block
274 min_distance = distance
275 min_dof_owner = MPI.COMM_WORLD.bcast(min_dof_owner, root=owning_processor)
276 # If dofs not owned by cell
277 if owning_processor != min_dof_owner:
278 owning_processor = min_dof_owner
280 if MPI.COMM_WORLD.rank == min_dof_owner:
281 # Re-search using the closest cell
282 x = V.tabulate_dof_coordinates()
283 cell_blocks = dofmap.cell_dofs(closest_cell)
284 for block in cell_blocks:
285 distance = np.linalg.norm(_geometry.compute_distance_gjk(point, x[block]))
286 if distance < min_distance:
287 # If cell owned by processor, but not the closest dof
288 if block < local_max:
289 min_dof_owner = MPI.COMM_WORLD.rank
290 else:
291 min_dof_owner = ghost_owner[block - local_max]
292 minimal_distance_block = block
293 min_distance = distance
294 assert min_dof_owner == owning_processor
295 return owning_processor, [minimal_distance_block]
296 else:
297 return owning_processor, []
300def create_point_to_point_constraint(V, slave_point, master_point, vector=None):
301 # Determine which processor owns the dof closest to the slave and master point
302 slave_proc, slave_block = determine_closest_block(V, slave_point)
303 master_proc, master_block = determine_closest_block(V, master_point)
304 is_master_proc = MPI.COMM_WORLD.rank == master_proc
305 is_slave_proc = MPI.COMM_WORLD.rank == slave_proc
307 block_size = V.dofmap.index_map_bs
308 imap = V.dofmap.index_map
309 # Output structures
310 slaves, masters, coeffs, owners, offsets = [], [], [], [], []
311 # Information required to handle vector as input
312 zero_indices, slave_index = None, None
313 if vector is not None:
314 zero_indices = np.argwhere(np.isclose(vector, 0)).T[0]
315 slave_index = np.argmax(np.abs(vector))
316 if is_slave_proc:
317 assert len(slave_block) == 1
318 slave_block_g = imap.local_to_global(np.asarray(slave_block, dtype=np.int32))[0]
319 if vector is None:
320 slaves = np.arange(
321 slave_block[0] * block_size,
322 slave_block[0] * block_size + block_size,
323 dtype=np.int32,
324 )
325 else:
326 assert len(vector) == block_size
327 # Check for input vector (Should be of same length as number of slaves)
328 # All entries should not be zero
329 assert not np.isin(slave_index, zero_indices)
330 # Check vector for zero contributions
331 slaves = np.array([slave_block[0] * block_size + slave_index], dtype=np.int32)
332 for i in range(block_size):
333 if i != slave_index and not np.isin(i, zero_indices):
334 masters.append(slave_block_g * block_size + i)
335 owners.append(slave_proc)
336 coeffs.append(-vector[i] / vector[slave_index])
338 global_masters = None
339 if is_master_proc:
340 assert len(master_block) == 1
341 master_block_g = imap.local_to_global(np.asarray(master_block, dtype=np.int32))[0]
342 masters_as_glob = np.arange(
343 master_block_g * block_size, master_block_g * block_size + block_size, dtype=np.int64
344 )
345 else:
346 masters_as_glob = np.array([], dtype=np.int64)
348 ghost_processors = []
349 shared_indices = dolfinx_mpc.cpp.mpc.compute_shared_indices(V._cpp_object)
351 if is_master_proc and is_slave_proc:
352 # If slaves and masters are on the same processor finalize local work
353 if vector is None:
354 masters = masters_as_glob
355 owners = np.full(len(masters), master_proc, dtype=np.int32)
356 coeffs = np.ones(len(masters), dtype=_dt)
357 offsets = np.arange(0, len(masters) + 1, dtype=np.int32)
358 else:
359 for i in range(len(masters_as_glob)):
360 if not np.isin(i, zero_indices):
361 masters.append(masters_as_glob[i])
362 owners.append(master_proc)
363 coeffs.append(vector[i] / vector[slave_index])
364 offsets = [0, len(masters)]
365 else:
366 # Send/Recv masters from other processor
367 if is_master_proc:
368 MPI.COMM_WORLD.send(masters_as_glob, dest=slave_proc, tag=10)
369 if is_slave_proc:
370 global_masters = MPI.COMM_WORLD.recv(source=master_proc, tag=10)
371 for i, master in enumerate(global_masters):
372 if not np.isin(i, zero_indices):
373 masters.append(master)
374 owners.append(master_proc)
375 if vector is None:
376 coeffs.append(1)
377 else:
378 coeffs.append(vector[i] / vector[slave_index])
379 if vector is None:
380 offsets = np.arange(0, len(slaves) + 1, dtype=np.int32)
381 else:
382 offsets = np.array([0, len(masters)], dtype=np.int32)
383 ghost_processors = shared_indices.links(slave_block[0])
385 # Broadcast processors containg slave
386 ghost_processors = MPI.COMM_WORLD.bcast(ghost_processors, root=slave_proc)
387 if is_slave_proc:
388 for proc in ghost_processors:
389 MPI.COMM_WORLD.send(slave_block_g * block_size + slaves % block_size, dest=proc, tag=20 + proc)
390 MPI.COMM_WORLD.send(coeffs, dest=proc, tag=30 + proc)
391 MPI.COMM_WORLD.send(owners, dest=proc, tag=40 + proc)
392 MPI.COMM_WORLD.send(masters, dest=proc, tag=50 + proc)
393 MPI.COMM_WORLD.send(offsets, dest=proc, tag=60 + proc)
395 # Receive data for ghost slaves
396 ghost_slaves, ghost_masters, ghost_coeffs, ghost_owners, ghost_offsets = [], [], [], [], []
397 if np.isin(MPI.COMM_WORLD.rank, ghost_processors):
398 # Convert recieved slaves to the corresponding ghost index
399 recv_slaves = MPI.COMM_WORLD.recv(source=slave_proc, tag=20 + MPI.COMM_WORLD.rank)
400 ghost_coeffs = MPI.COMM_WORLD.recv(source=slave_proc, tag=30 + MPI.COMM_WORLD.rank)
401 ghost_owners = MPI.COMM_WORLD.recv(source=slave_proc, tag=40 + MPI.COMM_WORLD.rank)
402 ghost_masters = MPI.COMM_WORLD.recv(source=slave_proc, tag=50 + MPI.COMM_WORLD.rank)
403 ghost_offsets = MPI.COMM_WORLD.recv(source=slave_proc, tag=60 + MPI.COMM_WORLD.rank)
405 # Unroll ghost blocks
406 ghosts = imap.ghosts
407 ghost_dofs = [g * block_size + i for g in ghosts for i in range(block_size)]
409 ghost_slaves = np.zeros(len(recv_slaves), dtype=np.int32)
410 local_size = imap.size_local
411 for i, slave in enumerate(recv_slaves):
412 idx = np.argwhere(ghost_dofs == slave)[0, 0]
413 ghost_slaves[i] = local_size * block_size + idx
414 slaves = np.asarray(np.append(slaves, ghost_slaves), dtype=np.int32)
415 masters = np.asarray(np.append(masters, ghost_masters), dtype=np.int64)
416 coeffs = np.asarray(np.append(coeffs, ghost_coeffs), dtype=_dt)
417 owners = np.asarray(np.append(owners, ghost_owners), dtype=np.int32)
418 offsets = np.asarray(np.append(offsets, ghost_offsets), dtype=np.int32)
419 return slaves, masters, coeffs, owners, offsets
422def create_normal_approximation(V: _fem.FunctionSpace, mt: _cpp.mesh.MeshTags_int32, value: int):
423 """
424 Creates a normal approximation for the dofs in the closure of the attached entities.
425 Where a dof is attached to entities facets, an average is computed
427 Args:
428 V: The function space
429 mt: The meshtag containing the indices
430 value: Value for the entities in the mesh tag to compute normal on
432 Returns:
433 nh: The normal vector
434 """
435 nh = _fem.Function(V)
436 n_cpp = dolfinx_mpc.cpp.mpc.create_normal_approximation(V._cpp_object, mt.dim, mt.find(value))
437 nh._cpp_object = n_cpp
438 return nh