Coverage for python/src/dolfinx_mpc/dictcondition.py: 98%

129 statements  

« prev     ^ index     » next       coverage.py v7.8.0, created at 2025-04-03 13:59 +0000

1# Copyright (C) 2020-2021 Jørgen S. Dokken 

2# 

3# This file is part of DOLFINX_MPC 

4# 

5# SPDX-License-Identifier: MIT 

6from __future__ import annotations 

7 

8import typing 

9 

10import dolfinx 

11import dolfinx.fem as fem 

12import numpy as np 

13from dolfinx import default_scalar_type 

14 

15 

16def close_to( 

17 point: np.typing.NDArray[typing.Union[np.float64, np.float32]], 

18 atol=1000 * np.finfo(dolfinx.default_real_type).resolution, 

19): 

20 """ 

21 Convenience function for locating a point [x,y,z] 

22 within an array x [[x0,...,xN],[y0,...,yN], [z0,...,zN]]. 

23 

24 Args: 

25 point: The point should be padded to 3D 

26 """ 

27 return lambda x: np.isclose(x, point, atol=atol).all(axis=0) 

28 

29 

30@typing.no_type_check 

31def create_dictionary_constraint( 

32 V: fem.functionspace, 

33 slave_master_dict: typing.Dict[bytes, typing.Dict[bytes, float]], 

34 subspace_slave: typing.Optional[int] = None, 

35 subspace_master: typing.Optional[int] = None, 

36): 

37 """ 

38 Returns a multi point constraint for a given function space 

39 and dictionary constraint. 

40 Args: 

41 V: The function space 

42 slave_master_dict: The dictionary 

43 subspace_slave: If using mixed or vector space, and only want to use dofs from 

44 a sub space as slave add index here. 

45 subspace_master: Subspace index for mixed or vector spaces 

46 

47 Examples: 

48 If the dof `D` located at `[d0,d1]` should be constrained to the dofs `E` and 

49 F at `[e0,e1]` and `[f0,f1]` as :math:`D = \\alpha E + \\beta F` 

50 the dictionary should be: 

51 

52 .. highlight:: python 

53 .. code-block:: python 

54 

55 {np.array([d0, d1], dtype=mesh.geometry.x.dtype).tobytes(): 

56 {numpy.array([e0, e1], dtype=mesh.geometry.x.dtype).tobytes(): alpha, 

57 numpy.array([f0, f1], dtype=mesh.geometry.x.dtype).tobytes(): beta}} 

58 """ 

59 comm = V.mesh.comm 

60 bs = V.dofmap.index_map_bs 

61 local_size = V.dofmap.index_map.size_local * bs 

62 index_map = V.dofmap.index_map 

63 owned_entities = {} 

64 ghosted_entities = {} 

65 non_local_entities = {} 

66 slaves_local = {} 

67 slaves_ghost = {} 

68 dfloat = V.mesh.geometry.x.dtype 

69 slave_point_nd = np.zeros((3, 1), dtype=dfloat) 

70 for i, slave_point in enumerate(slave_master_dict.keys()): 

71 num_masters = len(list(slave_master_dict[slave_point].keys())) 

72 # Status for current slave, -1 if not on proc, 0 if ghost, 1 if owned 

73 slave_status = -1 

74 # Wrap slave point as numpy array 

75 sp = np.frombuffer(slave_point, dtype=dfloat) 

76 for j, coord in enumerate(sp): 

77 slave_point_nd[j] = coord 

78 slave_point_nd[len(sp) :] = 0 

79 

80 if subspace_slave is None: 

81 slave_dofs = fem.locate_dofs_geometrical(V, close_to(slave_point_nd)) 

82 else: 

83 Vsub = V.sub(subspace_slave).collapse()[0] 

84 slave_dofs = fem.locate_dofs_geometrical((V.sub(subspace_slave), Vsub), close_to(slave_point_nd))[0] 

85 if len(slave_dofs) == 1: 

86 # Decide if slave is ghost or not 

87 if slave_dofs[0] < local_size: 

88 slaves_local[i] = slave_dofs[0] 

89 owned_entities[i] = { 

90 "masters": np.full(num_masters, -1, dtype=np.int64), 

91 "coeffs": np.full(num_masters, -1, dtype=dolfinx.default_scalar_type), 

92 "owners": np.full(num_masters, -1, dtype=np.int32), 

93 "master_count": 0, 

94 "local_index": [], 

95 } 

96 slave_status = 1 

97 else: 

98 slaves_ghost[i] = slave_dofs[0] 

99 ghosted_entities[i] = { 

100 "masters": np.full(num_masters, -1, dtype=np.int64), 

101 "coeffs": np.full(num_masters, -1, dtype=dolfinx.default_scalar_type), 

102 "owners": np.full(num_masters, -1, dtype=np.int32), 

103 "master_count": 0, 

104 "local_index": [], 

105 } 

106 slave_status = 0 

107 elif len(slave_dofs) > 1: 

108 raise RuntimeError("Multiple slaves found at same point. " + "You should use sub-space locators.") 

109 # Wrap as list to ensure order later 

110 master_points = list(slave_master_dict[slave_point].keys()) 

111 master_points_nd = np.zeros((3, len(master_points)), dtype=dfloat) 

112 for j, master_point in enumerate(master_points): 

113 # Wrap bytes as numpy array 

114 for k, coord in enumerate(np.frombuffer(master_point, dtype=dfloat)): 

115 master_points_nd[k, j] = coord 

116 if subspace_master is None: 

117 master_dofs = fem.locate_dofs_geometrical(V, close_to(master_points_nd[:, j : j + 1])) 

118 else: 

119 Vsub = V.sub(subspace_master).collapse()[0] 

120 master_dofs = fem.locate_dofs_geometrical( 

121 (V.sub(subspace_master), Vsub), close_to(master_points_nd[:, j : j + 1]) 

122 )[0] 

123 

124 # Only add masters owned by this processor 

125 master_dofs = master_dofs[master_dofs < local_size] 

126 if len(master_dofs) == 1: 

127 master_block = master_dofs[0] // bs 

128 master_rem = master_dofs[0] % bs 

129 glob_master = index_map.local_to_global(np.asarray([master_block], dtype=np.int32))[0] 

130 if slave_status == -1: 

131 if i in non_local_entities.keys(): 

132 non_local_entities[i]["masters"].append(glob_master * bs + master_rem) 

133 non_local_entities[i]["coeffs"].append(slave_master_dict[slave_point][master_point]) 

134 (non_local_entities[i]["owners"].append(comm.rank),) 

135 non_local_entities[i]["local_index"].append(j) 

136 else: 

137 non_local_entities[i] = { 

138 "masters": [glob_master * bs + master_rem], 

139 "coeffs": [slave_master_dict[slave_point][master_point]], 

140 "owners": [comm.rank], 

141 "local_index": [j], 

142 } 

143 elif slave_status == 0: 

144 ghosted_entities[i]["masters"][j] = glob_master * bs + master_rem 

145 ghosted_entities[i]["owners"][j] = comm.rank 

146 ghosted_entities[i]["coeffs"][j] = slave_master_dict[slave_point][master_point] 

147 ghosted_entities[i]["local_index"].append(j) 

148 elif slave_status == 1: 

149 owned_entities[i]["masters"][j] = glob_master * bs + master_rem 

150 owned_entities[i]["owners"][j] = comm.rank 

151 owned_entities[i]["coeffs"][j] = slave_master_dict[slave_point][master_point] 

152 owned_entities[i]["local_index"].append(j) 

153 else: 

154 raise RuntimeError("Invalid slave status: {0:d} (-1,0,1 are valid options)".format(slave_status)) 

155 elif len(master_dofs) > 1: 

156 raise RuntimeError("Multiple masters found at same point. You should use sub-space locators.") 

157 

158 # Send the ghost and owned entities to processor 0 to gather them 

159 data_to_send = [owned_entities, ghosted_entities, non_local_entities] 

160 if comm.rank != 0: 

161 comm.send(data_to_send, dest=0, tag=1) 

162 del owned_entities, ghosted_entities, non_local_entities 

163 # Gather all info on proc 0 and sort data 

164 owned_slaves, ghosted_slaves = None, None 

165 if comm.rank == 0: 

166 recv = {0: data_to_send} 

167 for proc in range(1, comm.size): 

168 recv[proc] = comm.recv(source=proc, tag=1) 

169 

170 for proc in range(comm.size): 

171 # Loop through all masters 

172 other_procs = np.arange(comm.size) 

173 other_procs = other_procs[other_procs != proc] 

174 # Loop through all owned slaves and ghosts, and update 

175 # the master entries 

176 for pair in [[0, 1], [1, 0]]: 

177 i, j = pair 

178 for slave in recv[proc][i].keys(): 

179 for o_proc in other_procs: 

180 # If slave is ghost on other proc add local masters 

181 if slave in recv[o_proc][j].keys(): 

182 # Update master with possible entries from ghost 

183 o_masters = recv[o_proc][j][slave]["local_index"] 

184 for o_master in o_masters: 

185 recv[proc][i][slave]["masters"][o_master] = recv[o_proc][j][slave]["masters"][o_master] 

186 recv[proc][i][slave]["coeffs"][o_master] = recv[o_proc][j][slave]["coeffs"][o_master] 

187 recv[proc][i][slave]["owners"][o_master] = recv[o_proc][j][slave]["owners"][o_master] 

188 # If proc only has master, but not the slave 

189 if slave in recv[o_proc][2].keys(): 

190 o_masters = recv[o_proc][2][slave]["local_index"] 

191 # As non owned indices only store non-zero entries 

192 for k, o_master in enumerate(o_masters): 

193 recv[proc][i][slave]["masters"][o_master] = recv[o_proc][2][slave]["masters"][k] 

194 recv[proc][i][slave]["coeffs"][o_master] = recv[o_proc][2][slave]["coeffs"][k] 

195 recv[proc][i][slave]["owners"][o_master] = recv[o_proc][2][slave]["owners"][k] 

196 if proc == comm.rank: 

197 owned_slaves = recv[proc][0] 

198 ghosted_slaves = recv[proc][1] 

199 else: 

200 # If no owned masters, do not send masters 

201 if len(recv[proc][0].keys()) > 0: 

202 comm.send(recv[proc][0], dest=proc, tag=55) 

203 if len(recv[proc][1].keys()) > 0: 

204 comm.send(recv[proc][1], dest=proc, tag=66) 

205 else: 

206 if len(slaves_local.keys()) > 0: 

207 owned_slaves = comm.recv(source=0, tag=55) 

208 if len(slaves_ghost.keys()) > 0: 

209 ghosted_slaves = comm.recv(source=0, tag=66) 

210 

211 # Flatten slaves (local) 

212 slaves, masters, coeffs, owners, offsets = [], [], [], [], [0] 

213 for slave_index in slaves_local.keys(): 

214 slaves.append(slaves_local[slave_index]) 

215 masters.extend(owned_slaves[slave_index]["masters"]) # type: ignore 

216 owners.extend(owned_slaves[slave_index]["owners"]) # type: ignore 

217 coeffs.extend(owned_slaves[slave_index]["coeffs"]) # type: ignore 

218 offsets.append(len(masters)) 

219 

220 for slave_index in slaves_ghost.keys(): 

221 slaves.append(slaves_ghost[slave_index]) 

222 masters.extend(ghosted_slaves[slave_index]["masters"]) # type: ignore 

223 owners.extend(ghosted_slaves[slave_index]["owners"]) # type: ignore 

224 coeffs.extend(ghosted_slaves[slave_index]["coeffs"]) # type: ignore 

225 offsets.append(len(masters)) 

226 return ( 

227 np.asarray(slaves, dtype=np.int32), 

228 np.asarray(masters, dtype=np.int64), 

229 np.asarray(coeffs, dtype=default_scalar_type), 

230 np.asarray(owners, dtype=np.int32), 

231 np.asarray(offsets, dtype=np.int32), 

232 )