1"""
2Interpolation inside triangular grids.
3"""
4
5import numpy as np
6
7from matplotlib import _api
8from matplotlib.tri import Triangulation
9from matplotlib.tri._trifinder import TriFinder
10from matplotlib.tri._tritools import TriAnalyzer
11
12__all__ = ('TriInterpolator', 'LinearTriInterpolator', 'CubicTriInterpolator')
13
14
15class TriInterpolator:
16 """
17 Abstract base class for classes used to interpolate on a triangular grid.
18
19 Derived classes implement the following methods:
20
21 - ``__call__(x, y)``,
22 where x, y are array-like point coordinates of the same shape, and
23 that returns a masked array of the same shape containing the
24 interpolated z-values.
25
26 - ``gradient(x, y)``,
27 where x, y are array-like point coordinates of the same
28 shape, and that returns a list of 2 masked arrays of the same shape
29 containing the 2 derivatives of the interpolator (derivatives of
30 interpolated z values with respect to x and y).
31 """
32
33 def __init__(self, triangulation, z, trifinder=None):
34 _api.check_isinstance(Triangulation, triangulation=triangulation)
35 self._triangulation = triangulation
36
37 self._z = np.asarray(z)
38 if self._z.shape != self._triangulation.x.shape:
39 raise ValueError("z array must have same length as triangulation x"
40 " and y arrays")
41
42 _api.check_isinstance((TriFinder, None), trifinder=trifinder)
43 self._trifinder = trifinder or self._triangulation.get_trifinder()
44
45 # Default scaling factors : 1.0 (= no scaling)
46 # Scaling may be used for interpolations for which the order of
47 # magnitude of x, y has an impact on the interpolant definition.
48 # Please refer to :meth:`_interpolate_multikeys` for details.
49 self._unit_x = 1.0
50 self._unit_y = 1.0
51
52 # Default triangle renumbering: None (= no renumbering)
53 # Renumbering may be used to avoid unnecessary computations
54 # if complex calculations are done inside the Interpolator.
55 # Please refer to :meth:`_interpolate_multikeys` for details.
56 self._tri_renum = None
57
58 # __call__ and gradient docstrings are shared by all subclasses
59 # (except, if needed, relevant additions).
60 # However these methods are only implemented in subclasses to avoid
61 # confusion in the documentation.
62 _docstring__call__ = """
63 Returns a masked array containing interpolated values at the specified
64 (x, y) points.
65
66 Parameters
67 ----------
68 x, y : array-like
69 x and y coordinates of the same shape and any number of
70 dimensions.
71
72 Returns
73 -------
74 np.ma.array
75 Masked array of the same shape as *x* and *y*; values corresponding
76 to (*x*, *y*) points outside of the triangulation are masked out.
77
78 """
79
80 _docstringgradient = r"""
81 Returns a list of 2 masked arrays containing interpolated derivatives
82 at the specified (x, y) points.
83
84 Parameters
85 ----------
86 x, y : array-like
87 x and y coordinates of the same shape and any number of
88 dimensions.
89
90 Returns
91 -------
92 dzdx, dzdy : np.ma.array
93 2 masked arrays of the same shape as *x* and *y*; values
94 corresponding to (x, y) points outside of the triangulation
95 are masked out.
96 The first returned array contains the values of
97 :math:`\frac{\partial z}{\partial x}` and the second those of
98 :math:`\frac{\partial z}{\partial y}`.
99
100 """
101
102 def _interpolate_multikeys(self, x, y, tri_index=None,
103 return_keys=('z',)):
104 """
105 Versatile (private) method defined for all TriInterpolators.
106
107 :meth:`_interpolate_multikeys` is a wrapper around method
108 :meth:`_interpolate_single_key` (to be defined in the child
109 subclasses).
110 :meth:`_interpolate_single_key actually performs the interpolation,
111 but only for 1-dimensional inputs and at valid locations (inside
112 unmasked triangles of the triangulation).
113
114 The purpose of :meth:`_interpolate_multikeys` is to implement the
115 following common tasks needed in all subclasses implementations:
116
117 - calculation of containing triangles
118 - dealing with more than one interpolation request at the same
119 location (e.g., if the 2 derivatives are requested, it is
120 unnecessary to compute the containing triangles twice)
121 - scaling according to self._unit_x, self._unit_y
122 - dealing with points outside of the grid (with fill value np.nan)
123 - dealing with multi-dimensional *x*, *y* arrays: flattening for
124 :meth:`_interpolate_params` call and final reshaping.
125
126 (Note that np.vectorize could do most of those things very well for
127 you, but it does it by function evaluations over successive tuples of
128 the input arrays. Therefore, this tends to be more time-consuming than
129 using optimized numpy functions - e.g., np.dot - which can be used
130 easily on the flattened inputs, in the child-subclass methods
131 :meth:`_interpolate_single_key`.)
132
133 It is guaranteed that the calls to :meth:`_interpolate_single_key`
134 will be done with flattened (1-d) array-like input parameters *x*, *y*
135 and with flattened, valid `tri_index` arrays (no -1 index allowed).
136
137 Parameters
138 ----------
139 x, y : array-like
140 x and y coordinates where interpolated values are requested.
141 tri_index : array-like of int, optional
142 Array of the containing triangle indices, same shape as
143 *x* and *y*. Defaults to None. If None, these indices
144 will be computed by a TriFinder instance.
145 (Note: For point outside the grid, tri_index[ipt] shall be -1).
146 return_keys : tuple of keys from {'z', 'dzdx', 'dzdy'}
147 Defines the interpolation arrays to return, and in which order.
148
149 Returns
150 -------
151 list of arrays
152 Each array-like contains the expected interpolated values in the
153 order defined by *return_keys* parameter.
154 """
155 # Flattening and rescaling inputs arrays x, y
156 # (initial shape is stored for output)
157 x = np.asarray(x, dtype=np.float64)
158 y = np.asarray(y, dtype=np.float64)
159 sh_ret = x.shape
160 if x.shape != y.shape:
161 raise ValueError("x and y shall have same shapes."
162 f" Given: {x.shape} and {y.shape}")
163 x = np.ravel(x)
164 y = np.ravel(y)
165 x_scaled = x/self._unit_x
166 y_scaled = y/self._unit_y
167 size_ret = np.size(x_scaled)
168
169 # Computes & ravels the element indexes, extract the valid ones.
170 if tri_index is None:
171 tri_index = self._trifinder(x, y)
172 else:
173 if tri_index.shape != sh_ret:
174 raise ValueError(
175 "tri_index array is provided and shall"
176 " have same shape as x and y. Given: "
177 f"{tri_index.shape} and {sh_ret}")
178 tri_index = np.ravel(tri_index)
179
180 mask_in = (tri_index != -1)
181 if self._tri_renum is None:
182 valid_tri_index = tri_index[mask_in]
183 else:
184 valid_tri_index = self._tri_renum[tri_index[mask_in]]
185 valid_x = x_scaled[mask_in]
186 valid_y = y_scaled[mask_in]
187
188 ret = []
189 for return_key in return_keys:
190 # Find the return index associated with the key.
191 try:
192 return_index = {'z': 0, 'dzdx': 1, 'dzdy': 2}[return_key]
193 except KeyError as err:
194 raise ValueError("return_keys items shall take values in"
195 " {'z', 'dzdx', 'dzdy'}") from err
196
197 # Sets the scale factor for f & df components
198 scale = [1., 1./self._unit_x, 1./self._unit_y][return_index]
199
200 # Computes the interpolation
201 ret_loc = np.empty(size_ret, dtype=np.float64)
202 ret_loc[~mask_in] = np.nan
203 ret_loc[mask_in] = self._interpolate_single_key(
204 return_key, valid_tri_index, valid_x, valid_y) * scale
205 ret += [np.ma.masked_invalid(ret_loc.reshape(sh_ret), copy=False)]
206
207 return ret
208
209 def _interpolate_single_key(self, return_key, tri_index, x, y):
210 """
211 Interpolate at points belonging to the triangulation
212 (inside an unmasked triangles).
213
214 Parameters
215 ----------
216 return_key : {'z', 'dzdx', 'dzdy'}
217 The requested values (z or its derivatives).
218 tri_index : 1D int array
219 Valid triangle index (cannot be -1).
220 x, y : 1D arrays, same shape as `tri_index`
221 Valid locations where interpolation is requested.
222
223 Returns
224 -------
225 1-d array
226 Returned array of the same size as *tri_index*
227 """
228 raise NotImplementedError("TriInterpolator subclasses" +
229 "should implement _interpolate_single_key!")
230
231
232class LinearTriInterpolator(TriInterpolator):
233 """
234 Linear interpolator on a triangular grid.
235
236 Each triangle is represented by a plane so that an interpolated value at
237 point (x, y) lies on the plane of the triangle containing (x, y).
238 Interpolated values are therefore continuous across the triangulation, but
239 their first derivatives are discontinuous at edges between triangles.
240
241 Parameters
242 ----------
243 triangulation : `~matplotlib.tri.Triangulation`
244 The triangulation to interpolate over.
245 z : (npoints,) array-like
246 Array of values, defined at grid points, to interpolate between.
247 trifinder : `~matplotlib.tri.TriFinder`, optional
248 If this is not specified, the Triangulation's default TriFinder will
249 be used by calling `.Triangulation.get_trifinder`.
250
251 Methods
252 -------
253 `__call__` (x, y) : Returns interpolated values at (x, y) points.
254 `gradient` (x, y) : Returns interpolated derivatives at (x, y) points.
255
256 """
257 def __init__(self, triangulation, z, trifinder=None):
258 super().__init__(triangulation, z, trifinder)
259
260 # Store plane coefficients for fast interpolation calculations.
261 self._plane_coefficients = \
262 self._triangulation.calculate_plane_coefficients(self._z)
263
264 def __call__(self, x, y):
265 return self._interpolate_multikeys(x, y, tri_index=None,
266 return_keys=('z',))[0]
267 __call__.__doc__ = TriInterpolator._docstring__call__
268
269 def gradient(self, x, y):
270 return self._interpolate_multikeys(x, y, tri_index=None,
271 return_keys=('dzdx', 'dzdy'))
272 gradient.__doc__ = TriInterpolator._docstringgradient
273
274 def _interpolate_single_key(self, return_key, tri_index, x, y):
275 _api.check_in_list(['z', 'dzdx', 'dzdy'], return_key=return_key)
276 if return_key == 'z':
277 return (self._plane_coefficients[tri_index, 0]*x +
278 self._plane_coefficients[tri_index, 1]*y +
279 self._plane_coefficients[tri_index, 2])
280 elif return_key == 'dzdx':
281 return self._plane_coefficients[tri_index, 0]
282 else: # 'dzdy'
283 return self._plane_coefficients[tri_index, 1]
284
285
286class CubicTriInterpolator(TriInterpolator):
287 r"""
288 Cubic interpolator on a triangular grid.
289
290 In one-dimension - on a segment - a cubic interpolating function is
291 defined by the values of the function and its derivative at both ends.
292 This is almost the same in 2D inside a triangle, except that the values
293 of the function and its 2 derivatives have to be defined at each triangle
294 node.
295
296 The CubicTriInterpolator takes the value of the function at each node -
297 provided by the user - and internally computes the value of the
298 derivatives, resulting in a smooth interpolation.
299 (As a special feature, the user can also impose the value of the
300 derivatives at each node, but this is not supposed to be the common
301 usage.)
302
303 Parameters
304 ----------
305 triangulation : `~matplotlib.tri.Triangulation`
306 The triangulation to interpolate over.
307 z : (npoints,) array-like
308 Array of values, defined at grid points, to interpolate between.
309 kind : {'min_E', 'geom', 'user'}, optional
310 Choice of the smoothing algorithm, in order to compute
311 the interpolant derivatives (defaults to 'min_E'):
312
313 - if 'min_E': (default) The derivatives at each node is computed
314 to minimize a bending energy.
315 - if 'geom': The derivatives at each node is computed as a
316 weighted average of relevant triangle normals. To be used for
317 speed optimization (large grids).
318 - if 'user': The user provides the argument *dz*, no computation
319 is hence needed.
320
321 trifinder : `~matplotlib.tri.TriFinder`, optional
322 If not specified, the Triangulation's default TriFinder will
323 be used by calling `.Triangulation.get_trifinder`.
324 dz : tuple of array-likes (dzdx, dzdy), optional
325 Used only if *kind* ='user'. In this case *dz* must be provided as
326 (dzdx, dzdy) where dzdx, dzdy are arrays of the same shape as *z* and
327 are the interpolant first derivatives at the *triangulation* points.
328
329 Methods
330 -------
331 `__call__` (x, y) : Returns interpolated values at (x, y) points.
332 `gradient` (x, y) : Returns interpolated derivatives at (x, y) points.
333
334 Notes
335 -----
336 This note is a bit technical and details how the cubic interpolation is
337 computed.
338
339 The interpolation is based on a Clough-Tocher subdivision scheme of
340 the *triangulation* mesh (to make it clearer, each triangle of the
341 grid will be divided in 3 child-triangles, and on each child triangle
342 the interpolated function is a cubic polynomial of the 2 coordinates).
343 This technique originates from FEM (Finite Element Method) analysis;
344 the element used is a reduced Hsieh-Clough-Tocher (HCT)
345 element. Its shape functions are described in [1]_.
346 The assembled function is guaranteed to be C1-smooth, i.e. it is
347 continuous and its first derivatives are also continuous (this
348 is easy to show inside the triangles but is also true when crossing the
349 edges).
350
351 In the default case (*kind* ='min_E'), the interpolant minimizes a
352 curvature energy on the functional space generated by the HCT element
353 shape functions - with imposed values but arbitrary derivatives at each
354 node. The minimized functional is the integral of the so-called total
355 curvature (implementation based on an algorithm from [2]_ - PCG sparse
356 solver):
357
358 .. math::
359
360 E(z) = \frac{1}{2} \int_{\Omega} \left(
361 \left( \frac{\partial^2{z}}{\partial{x}^2} \right)^2 +
362 \left( \frac{\partial^2{z}}{\partial{y}^2} \right)^2 +
363 2\left( \frac{\partial^2{z}}{\partial{y}\partial{x}} \right)^2
364 \right) dx\,dy
365
366 If the case *kind* ='geom' is chosen by the user, a simple geometric
367 approximation is used (weighted average of the triangle normal
368 vectors), which could improve speed on very large grids.
369
370 References
371 ----------
372 .. [1] Michel Bernadou, Kamal Hassan, "Basis functions for general
373 Hsieh-Clough-Tocher triangles, complete or reduced.",
374 International Journal for Numerical Methods in Engineering,
375 17(5):784 - 789. 2.01.
376 .. [2] C.T. Kelley, "Iterative Methods for Optimization".
377
378 """
379 def __init__(self, triangulation, z, kind='min_E', trifinder=None,
380 dz=None):
381 super().__init__(triangulation, z, trifinder)
382
383 # Loads the underlying c++ _triangulation.
384 # (During loading, reordering of triangulation._triangles may occur so
385 # that all final triangles are now anti-clockwise)
386 self._triangulation.get_cpp_triangulation()
387
388 # To build the stiffness matrix and avoid zero-energy spurious modes
389 # we will only store internally the valid (unmasked) triangles and
390 # the necessary (used) points coordinates.
391 # 2 renumbering tables need to be computed and stored:
392 # - a triangle renum table in order to translate the result from a
393 # TriFinder instance into the internal stored triangle number.
394 # - a node renum table to overwrite the self._z values into the new
395 # (used) node numbering.
396 tri_analyzer = TriAnalyzer(self._triangulation)
397 (compressed_triangles, compressed_x, compressed_y, tri_renum,
398 node_renum) = tri_analyzer._get_compressed_triangulation()
399 self._triangles = compressed_triangles
400 self._tri_renum = tri_renum
401 # Taking into account the node renumbering in self._z:
402 valid_node = (node_renum != -1)
403 self._z[node_renum[valid_node]] = self._z[valid_node]
404
405 # Computing scale factors
406 self._unit_x = np.ptp(compressed_x)
407 self._unit_y = np.ptp(compressed_y)
408 self._pts = np.column_stack([compressed_x / self._unit_x,
409 compressed_y / self._unit_y])
410 # Computing triangle points
411 self._tris_pts = self._pts[self._triangles]
412 # Computing eccentricities
413 self._eccs = self._compute_tri_eccentricities(self._tris_pts)
414 # Computing dof estimations for HCT triangle shape function
415 _api.check_in_list(['user', 'geom', 'min_E'], kind=kind)
416 self._dof = self._compute_dof(kind, dz=dz)
417 # Loading HCT element
418 self._ReferenceElement = _ReducedHCT_Element()
419
420 def __call__(self, x, y):
421 return self._interpolate_multikeys(x, y, tri_index=None,
422 return_keys=('z',))[0]
423 __call__.__doc__ = TriInterpolator._docstring__call__
424
425 def gradient(self, x, y):
426 return self._interpolate_multikeys(x, y, tri_index=None,
427 return_keys=('dzdx', 'dzdy'))
428 gradient.__doc__ = TriInterpolator._docstringgradient
429
430 def _interpolate_single_key(self, return_key, tri_index, x, y):
431 _api.check_in_list(['z', 'dzdx', 'dzdy'], return_key=return_key)
432 tris_pts = self._tris_pts[tri_index]
433 alpha = self._get_alpha_vec(x, y, tris_pts)
434 ecc = self._eccs[tri_index]
435 dof = np.expand_dims(self._dof[tri_index], axis=1)
436 if return_key == 'z':
437 return self._ReferenceElement.get_function_values(
438 alpha, ecc, dof)
439 else: # 'dzdx', 'dzdy'
440 J = self._get_jacobian(tris_pts)
441 dzdx = self._ReferenceElement.get_function_derivatives(
442 alpha, J, ecc, dof)
443 if return_key == 'dzdx':
444 return dzdx[:, 0, 0]
445 else:
446 return dzdx[:, 1, 0]
447
448 def _compute_dof(self, kind, dz=None):
449 """
450 Compute and return nodal dofs according to kind.
451
452 Parameters
453 ----------
454 kind : {'min_E', 'geom', 'user'}
455 Choice of the _DOF_estimator subclass to estimate the gradient.
456 dz : tuple of array-likes (dzdx, dzdy), optional
457 Used only if *kind*=user; in this case passed to the
458 :class:`_DOF_estimator_user`.
459
460 Returns
461 -------
462 array-like, shape (npts, 2)
463 Estimation of the gradient at triangulation nodes (stored as
464 degree of freedoms of reduced-HCT triangle elements).
465 """
466 if kind == 'user':
467 if dz is None:
468 raise ValueError("For a CubicTriInterpolator with "
469 "*kind*='user', a valid *dz* "
470 "argument is expected.")
471 TE = _DOF_estimator_user(self, dz=dz)
472 elif kind == 'geom':
473 TE = _DOF_estimator_geom(self)
474 else: # 'min_E', checked in __init__
475 TE = _DOF_estimator_min_E(self)
476 return TE.compute_dof_from_df()
477
478 @staticmethod
479 def _get_alpha_vec(x, y, tris_pts):
480 """
481 Fast (vectorized) function to compute barycentric coordinates alpha.
482
483 Parameters
484 ----------
485 x, y : array-like of dim 1 (shape (nx,))
486 Coordinates of the points whose points barycentric coordinates are
487 requested.
488 tris_pts : array like of dim 3 (shape: (nx, 3, 2))
489 Coordinates of the containing triangles apexes.
490
491 Returns
492 -------
493 array of dim 2 (shape (nx, 3))
494 Barycentric coordinates of the points inside the containing
495 triangles.
496 """
497 ndim = tris_pts.ndim-2
498
499 a = tris_pts[:, 1, :] - tris_pts[:, 0, :]
500 b = tris_pts[:, 2, :] - tris_pts[:, 0, :]
501 abT = np.stack([a, b], axis=-1)
502 ab = _transpose_vectorized(abT)
503 OM = np.stack([x, y], axis=1) - tris_pts[:, 0, :]
504
505 metric = ab @ abT
506 # Here we try to deal with the colinear cases.
507 # metric_inv is in this case set to the Moore-Penrose pseudo-inverse
508 # meaning that we will still return a set of valid barycentric
509 # coordinates.
510 metric_inv = _pseudo_inv22sym_vectorized(metric)
511 Covar = ab @ _transpose_vectorized(np.expand_dims(OM, ndim))
512 ksi = metric_inv @ Covar
513 alpha = _to_matrix_vectorized([
514 [1-ksi[:, 0, 0]-ksi[:, 1, 0]], [ksi[:, 0, 0]], [ksi[:, 1, 0]]])
515 return alpha
516
517 @staticmethod
518 def _get_jacobian(tris_pts):
519 """
520 Fast (vectorized) function to compute triangle jacobian matrix.
521
522 Parameters
523 ----------
524 tris_pts : array like of dim 3 (shape: (nx, 3, 2))
525 Coordinates of the containing triangles apexes.
526
527 Returns
528 -------
529 array of dim 3 (shape (nx, 2, 2))
530 Barycentric coordinates of the points inside the containing
531 triangles.
532 J[itri, :, :] is the jacobian matrix at apex 0 of the triangle
533 itri, so that the following (matrix) relationship holds:
534 [dz/dksi] = [J] x [dz/dx]
535 with x: global coordinates
536 ksi: element parametric coordinates in triangle first apex
537 local basis.
538 """
539 a = np.array(tris_pts[:, 1, :] - tris_pts[:, 0, :])
540 b = np.array(tris_pts[:, 2, :] - tris_pts[:, 0, :])
541 J = _to_matrix_vectorized([[a[:, 0], a[:, 1]],
542 [b[:, 0], b[:, 1]]])
543 return J
544
545 @staticmethod
546 def _compute_tri_eccentricities(tris_pts):
547 """
548 Compute triangle eccentricities.
549
550 Parameters
551 ----------
552 tris_pts : array like of dim 3 (shape: (nx, 3, 2))
553 Coordinates of the triangles apexes.
554
555 Returns
556 -------
557 array like of dim 2 (shape: (nx, 3))
558 The so-called eccentricity parameters [1] needed for HCT triangular
559 element.
560 """
561 a = np.expand_dims(tris_pts[:, 2, :] - tris_pts[:, 1, :], axis=2)
562 b = np.expand_dims(tris_pts[:, 0, :] - tris_pts[:, 2, :], axis=2)
563 c = np.expand_dims(tris_pts[:, 1, :] - tris_pts[:, 0, :], axis=2)
564 # Do not use np.squeeze, this is dangerous if only one triangle
565 # in the triangulation...
566 dot_a = (_transpose_vectorized(a) @ a)[:, 0, 0]
567 dot_b = (_transpose_vectorized(b) @ b)[:, 0, 0]
568 dot_c = (_transpose_vectorized(c) @ c)[:, 0, 0]
569 # Note that this line will raise a warning for dot_a, dot_b or dot_c
570 # zeros, but we choose not to support triangles with duplicate points.
571 return _to_matrix_vectorized([[(dot_c-dot_b) / dot_a],
572 [(dot_a-dot_c) / dot_b],
573 [(dot_b-dot_a) / dot_c]])
574
575
576# FEM element used for interpolation and for solving minimisation
577# problem (Reduced HCT element)
578class _ReducedHCT_Element:
579 """
580 Implementation of reduced HCT triangular element with explicit shape
581 functions.
582
583 Computes z, dz, d2z and the element stiffness matrix for bending energy:
584 E(f) = integral( (d2z/dx2 + d2z/dy2)**2 dA)
585
586 *** Reference for the shape functions: ***
587 [1] Basis functions for general Hsieh-Clough-Tocher _triangles, complete or
588 reduced.
589 Michel Bernadou, Kamal Hassan
590 International Journal for Numerical Methods in Engineering.
591 17(5):784 - 789. 2.01
592
593 *** Element description: ***
594 9 dofs: z and dz given at 3 apex
595 C1 (conform)
596
597 """
598 # 1) Loads matrices to generate shape functions as a function of
599 # triangle eccentricities - based on [1] p.11 '''
600 M = np.array([
601 [ 0.00, 0.00, 0.00, 4.50, 4.50, 0.00, 0.00, 0.00, 0.00, 0.00],
602 [-0.25, 0.00, 0.00, 0.50, 1.25, 0.00, 0.00, 0.00, 0.00, 0.00],
603 [-0.25, 0.00, 0.00, 1.25, 0.50, 0.00, 0.00, 0.00, 0.00, 0.00],
604 [ 0.50, 1.00, 0.00, -1.50, 0.00, 3.00, 3.00, 0.00, 0.00, 3.00],
605 [ 0.00, 0.00, 0.00, -0.25, 0.25, 0.00, 1.00, 0.00, 0.00, 0.50],
606 [ 0.25, 0.00, 0.00, -0.50, -0.25, 1.00, 0.00, 0.00, 0.00, 1.00],
607 [ 0.50, 0.00, 1.00, 0.00, -1.50, 0.00, 0.00, 3.00, 3.00, 3.00],
608 [ 0.25, 0.00, 0.00, -0.25, -0.50, 0.00, 0.00, 0.00, 1.00, 1.00],
609 [ 0.00, 0.00, 0.00, 0.25, -0.25, 0.00, 0.00, 1.00, 0.00, 0.50]])
610 M0 = np.array([
611 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
612 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
613 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
614 [-1.00, 0.00, 0.00, 1.50, 1.50, 0.00, 0.00, 0.00, 0.00, -3.00],
615 [-0.50, 0.00, 0.00, 0.75, 0.75, 0.00, 0.00, 0.00, 0.00, -1.50],
616 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
617 [ 1.00, 0.00, 0.00, -1.50, -1.50, 0.00, 0.00, 0.00, 0.00, 3.00],
618 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
619 [ 0.50, 0.00, 0.00, -0.75, -0.75, 0.00, 0.00, 0.00, 0.00, 1.50]])
620 M1 = np.array([
621 [-0.50, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
622 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
623 [-0.25, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
624 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
625 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
626 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
627 [ 0.50, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
628 [ 0.25, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
629 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
630 M2 = np.array([
631 [ 0.50, 0.00, 0.00, 0.00, -1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
632 [ 0.25, 0.00, 0.00, 0.00, -0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
633 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
634 [-0.50, 0.00, 0.00, 0.00, 1.50, 0.00, 0.00, 0.00, 0.00, 0.00],
635 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
636 [-0.25, 0.00, 0.00, 0.00, 0.75, 0.00, 0.00, 0.00, 0.00, 0.00],
637 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
638 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00],
639 [ 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00, 0.00]])
640
641 # 2) Loads matrices to rotate components of gradient & Hessian
642 # vectors in the reference basis of triangle first apex (a0)
643 rotate_dV = np.array([[ 1., 0.], [ 0., 1.],
644 [ 0., 1.], [-1., -1.],
645 [-1., -1.], [ 1., 0.]])
646
647 rotate_d2V = np.array([[1., 0., 0.], [0., 1., 0.], [ 0., 0., 1.],
648 [0., 1., 0.], [1., 1., 1.], [ 0., -2., -1.],
649 [1., 1., 1.], [1., 0., 0.], [-2., 0., -1.]])
650
651 # 3) Loads Gauss points & weights on the 3 sub-_triangles for P2
652 # exact integral - 3 points on each subtriangles.
653 # NOTE: as the 2nd derivative is discontinuous , we really need those 9
654 # points!
655 n_gauss = 9
656 gauss_pts = np.array([[13./18., 4./18., 1./18.],
657 [ 4./18., 13./18., 1./18.],
658 [ 7./18., 7./18., 4./18.],
659 [ 1./18., 13./18., 4./18.],
660 [ 1./18., 4./18., 13./18.],
661 [ 4./18., 7./18., 7./18.],
662 [ 4./18., 1./18., 13./18.],
663 [13./18., 1./18., 4./18.],
664 [ 7./18., 4./18., 7./18.]], dtype=np.float64)
665 gauss_w = np.ones([9], dtype=np.float64) / 9.
666
667 # 4) Stiffness matrix for curvature energy
668 E = np.array([[1., 0., 0.], [0., 1., 0.], [0., 0., 2.]])
669
670 # 5) Loads the matrix to compute DOF_rot from tri_J at apex 0
671 J0_to_J1 = np.array([[-1., 1.], [-1., 0.]])
672 J0_to_J2 = np.array([[ 0., -1.], [ 1., -1.]])
673
674 def get_function_values(self, alpha, ecc, dofs):
675 """
676 Parameters
677 ----------
678 alpha : is a (N x 3 x 1) array (array of column-matrices) of
679 barycentric coordinates,
680 ecc : is a (N x 3 x 1) array (array of column-matrices) of triangle
681 eccentricities,
682 dofs : is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
683 degrees of freedom.
684
685 Returns
686 -------
687 Returns the N-array of interpolated function values.
688 """
689 subtri = np.argmin(alpha, axis=1)[:, 0]
690 ksi = _roll_vectorized(alpha, -subtri, axis=0)
691 E = _roll_vectorized(ecc, -subtri, axis=0)
692 x = ksi[:, 0, 0]
693 y = ksi[:, 1, 0]
694 z = ksi[:, 2, 0]
695 x_sq = x*x
696 y_sq = y*y
697 z_sq = z*z
698 V = _to_matrix_vectorized([
699 [x_sq*x], [y_sq*y], [z_sq*z], [x_sq*z], [x_sq*y], [y_sq*x],
700 [y_sq*z], [z_sq*y], [z_sq*x], [x*y*z]])
701 prod = self.M @ V
702 prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ V)
703 prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ V)
704 prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ V)
705 s = _roll_vectorized(prod, 3*subtri, axis=0)
706 return (dofs @ s)[:, 0, 0]
707
708 def get_function_derivatives(self, alpha, J, ecc, dofs):
709 """
710 Parameters
711 ----------
712 *alpha* is a (N x 3 x 1) array (array of column-matrices of
713 barycentric coordinates)
714 *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
715 triangle first apex)
716 *ecc* is a (N x 3 x 1) array (array of column-matrices of triangle
717 eccentricities)
718 *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
719 degrees of freedom.
720
721 Returns
722 -------
723 Returns the values of interpolated function derivatives [dz/dx, dz/dy]
724 in global coordinates at locations alpha, as a column-matrices of
725 shape (N x 2 x 1).
726 """
727 subtri = np.argmin(alpha, axis=1)[:, 0]
728 ksi = _roll_vectorized(alpha, -subtri, axis=0)
729 E = _roll_vectorized(ecc, -subtri, axis=0)
730 x = ksi[:, 0, 0]
731 y = ksi[:, 1, 0]
732 z = ksi[:, 2, 0]
733 x_sq = x*x
734 y_sq = y*y
735 z_sq = z*z
736 dV = _to_matrix_vectorized([
737 [ -3.*x_sq, -3.*x_sq],
738 [ 3.*y_sq, 0.],
739 [ 0., 3.*z_sq],
740 [ -2.*x*z, -2.*x*z+x_sq],
741 [-2.*x*y+x_sq, -2.*x*y],
742 [ 2.*x*y-y_sq, -y_sq],
743 [ 2.*y*z, y_sq],
744 [ z_sq, 2.*y*z],
745 [ -z_sq, 2.*x*z-z_sq],
746 [ x*z-y*z, x*y-y*z]])
747 # Puts back dV in first apex basis
748 dV = dV @ _extract_submatrices(
749 self.rotate_dV, subtri, block_size=2, axis=0)
750
751 prod = self.M @ dV
752 prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ dV)
753 prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ dV)
754 prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ dV)
755 dsdksi = _roll_vectorized(prod, 3*subtri, axis=0)
756 dfdksi = dofs @ dsdksi
757 # In global coordinates:
758 # Here we try to deal with the simplest colinear cases, returning a
759 # null matrix.
760 J_inv = _safe_inv22_vectorized(J)
761 dfdx = J_inv @ _transpose_vectorized(dfdksi)
762 return dfdx
763
764 def get_function_hessians(self, alpha, J, ecc, dofs):
765 """
766 Parameters
767 ----------
768 *alpha* is a (N x 3 x 1) array (array of column-matrices) of
769 barycentric coordinates
770 *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
771 triangle first apex)
772 *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
773 eccentricities
774 *dofs* is a (N x 1 x 9) arrays (arrays of row-matrices) of computed
775 degrees of freedom.
776
777 Returns
778 -------
779 Returns the values of interpolated function 2nd-derivatives
780 [d2z/dx2, d2z/dy2, d2z/dxdy] in global coordinates at locations alpha,
781 as a column-matrices of shape (N x 3 x 1).
782 """
783 d2sdksi2 = self.get_d2Sidksij2(alpha, ecc)
784 d2fdksi2 = dofs @ d2sdksi2
785 H_rot = self.get_Hrot_from_J(J)
786 d2fdx2 = d2fdksi2 @ H_rot
787 return _transpose_vectorized(d2fdx2)
788
789 def get_d2Sidksij2(self, alpha, ecc):
790 """
791 Parameters
792 ----------
793 *alpha* is a (N x 3 x 1) array (array of column-matrices) of
794 barycentric coordinates
795 *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
796 eccentricities
797
798 Returns
799 -------
800 Returns the arrays d2sdksi2 (N x 3 x 1) Hessian of shape functions
801 expressed in covariant coordinates in first apex basis.
802 """
803 subtri = np.argmin(alpha, axis=1)[:, 0]
804 ksi = _roll_vectorized(alpha, -subtri, axis=0)
805 E = _roll_vectorized(ecc, -subtri, axis=0)
806 x = ksi[:, 0, 0]
807 y = ksi[:, 1, 0]
808 z = ksi[:, 2, 0]
809 d2V = _to_matrix_vectorized([
810 [ 6.*x, 6.*x, 6.*x],
811 [ 6.*y, 0., 0.],
812 [ 0., 6.*z, 0.],
813 [ 2.*z, 2.*z-4.*x, 2.*z-2.*x],
814 [2.*y-4.*x, 2.*y, 2.*y-2.*x],
815 [2.*x-4.*y, 0., -2.*y],
816 [ 2.*z, 0., 2.*y],
817 [ 0., 2.*y, 2.*z],
818 [ 0., 2.*x-4.*z, -2.*z],
819 [ -2.*z, -2.*y, x-y-z]])
820 # Puts back d2V in first apex basis
821 d2V = d2V @ _extract_submatrices(
822 self.rotate_d2V, subtri, block_size=3, axis=0)
823 prod = self.M @ d2V
824 prod += _scalar_vectorized(E[:, 0, 0], self.M0 @ d2V)
825 prod += _scalar_vectorized(E[:, 1, 0], self.M1 @ d2V)
826 prod += _scalar_vectorized(E[:, 2, 0], self.M2 @ d2V)
827 d2sdksi2 = _roll_vectorized(prod, 3*subtri, axis=0)
828 return d2sdksi2
829
830 def get_bending_matrices(self, J, ecc):
831 """
832 Parameters
833 ----------
834 *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
835 triangle first apex)
836 *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
837 eccentricities
838
839 Returns
840 -------
841 Returns the element K matrices for bending energy expressed in
842 GLOBAL nodal coordinates.
843 K_ij = integral [ (d2zi/dx2 + d2zi/dy2) * (d2zj/dx2 + d2zj/dy2) dA]
844 tri_J is needed to rotate dofs from local basis to global basis
845 """
846 n = np.size(ecc, 0)
847
848 # 1) matrix to rotate dofs in global coordinates
849 J1 = self.J0_to_J1 @ J
850 J2 = self.J0_to_J2 @ J
851 DOF_rot = np.zeros([n, 9, 9], dtype=np.float64)
852 DOF_rot[:, 0, 0] = 1
853 DOF_rot[:, 3, 3] = 1
854 DOF_rot[:, 6, 6] = 1
855 DOF_rot[:, 1:3, 1:3] = J
856 DOF_rot[:, 4:6, 4:6] = J1
857 DOF_rot[:, 7:9, 7:9] = J2
858
859 # 2) matrix to rotate Hessian in global coordinates.
860 H_rot, area = self.get_Hrot_from_J(J, return_area=True)
861
862 # 3) Computes stiffness matrix
863 # Gauss quadrature.
864 K = np.zeros([n, 9, 9], dtype=np.float64)
865 weights = self.gauss_w
866 pts = self.gauss_pts
867 for igauss in range(self.n_gauss):
868 alpha = np.tile(pts[igauss, :], n).reshape(n, 3)
869 alpha = np.expand_dims(alpha, 2)
870 weight = weights[igauss]
871 d2Skdksi2 = self.get_d2Sidksij2(alpha, ecc)
872 d2Skdx2 = d2Skdksi2 @ H_rot
873 K += weight * (d2Skdx2 @ self.E @ _transpose_vectorized(d2Skdx2))
874
875 # 4) With nodal (not elem) dofs
876 K = _transpose_vectorized(DOF_rot) @ K @ DOF_rot
877
878 # 5) Need the area to compute total element energy
879 return _scalar_vectorized(area, K)
880
881 def get_Hrot_from_J(self, J, return_area=False):
882 """
883 Parameters
884 ----------
885 *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
886 triangle first apex)
887
888 Returns
889 -------
890 Returns H_rot used to rotate Hessian from local basis of first apex,
891 to global coordinates.
892 if *return_area* is True, returns also the triangle area (0.5*det(J))
893 """
894 # Here we try to deal with the simplest colinear cases; a null
895 # energy and area is imposed.
896 J_inv = _safe_inv22_vectorized(J)
897 Ji00 = J_inv[:, 0, 0]
898 Ji11 = J_inv[:, 1, 1]
899 Ji10 = J_inv[:, 1, 0]
900 Ji01 = J_inv[:, 0, 1]
901 H_rot = _to_matrix_vectorized([
902 [Ji00*Ji00, Ji10*Ji10, Ji00*Ji10],
903 [Ji01*Ji01, Ji11*Ji11, Ji01*Ji11],
904 [2*Ji00*Ji01, 2*Ji11*Ji10, Ji00*Ji11+Ji10*Ji01]])
905 if not return_area:
906 return H_rot
907 else:
908 area = 0.5 * (J[:, 0, 0]*J[:, 1, 1] - J[:, 0, 1]*J[:, 1, 0])
909 return H_rot, area
910
911 def get_Kff_and_Ff(self, J, ecc, triangles, Uc):
912 """
913 Build K and F for the following elliptic formulation:
914 minimization of curvature energy with value of function at node
915 imposed and derivatives 'free'.
916
917 Build the global Kff matrix in cco format.
918 Build the full Ff vec Ff = - Kfc x Uc.
919
920 Parameters
921 ----------
922 *J* is a (N x 2 x 2) array of jacobian matrices (jacobian matrix at
923 triangle first apex)
924 *ecc* is a (N x 3 x 1) array (array of column-matrices) of triangle
925 eccentricities
926 *triangles* is a (N x 3) array of nodes indexes.
927 *Uc* is (N x 3) array of imposed displacements at nodes
928
929 Returns
930 -------
931 (Kff_rows, Kff_cols, Kff_vals) Kff matrix in coo format - Duplicate
932 (row, col) entries must be summed.
933 Ff: force vector - dim npts * 3
934 """
935 ntri = np.size(ecc, 0)
936 vec_range = np.arange(ntri, dtype=np.int32)
937 c_indices = np.full(ntri, -1, dtype=np.int32) # for unused dofs, -1
938 f_dof = [1, 2, 4, 5, 7, 8]
939 c_dof = [0, 3, 6]
940
941 # vals, rows and cols indices in global dof numbering
942 f_dof_indices = _to_matrix_vectorized([[
943 c_indices, triangles[:, 0]*2, triangles[:, 0]*2+1,
944 c_indices, triangles[:, 1]*2, triangles[:, 1]*2+1,
945 c_indices, triangles[:, 2]*2, triangles[:, 2]*2+1]])
946
947 expand_indices = np.ones([ntri, 9, 1], dtype=np.int32)
948 f_row_indices = _transpose_vectorized(expand_indices @ f_dof_indices)
949 f_col_indices = expand_indices @ f_dof_indices
950 K_elem = self.get_bending_matrices(J, ecc)
951
952 # Extracting sub-matrices
953 # Explanation & notations:
954 # * Subscript f denotes 'free' degrees of freedom (i.e. dz/dx, dz/dx)
955 # * Subscript c denotes 'condensated' (imposed) degrees of freedom
956 # (i.e. z at all nodes)
957 # * F = [Ff, Fc] is the force vector
958 # * U = [Uf, Uc] is the imposed dof vector
959 # [ Kff Kfc ]
960 # * K = [ ] is the laplacian stiffness matrix
961 # [ Kcf Kff ]
962 # * As F = K x U one gets straightforwardly: Ff = - Kfc x Uc
963
964 # Computing Kff stiffness matrix in sparse coo format
965 Kff_vals = np.ravel(K_elem[np.ix_(vec_range, f_dof, f_dof)])
966 Kff_rows = np.ravel(f_row_indices[np.ix_(vec_range, f_dof, f_dof)])
967 Kff_cols = np.ravel(f_col_indices[np.ix_(vec_range, f_dof, f_dof)])
968
969 # Computing Ff force vector in sparse coo format
970 Kfc_elem = K_elem[np.ix_(vec_range, f_dof, c_dof)]
971 Uc_elem = np.expand_dims(Uc, axis=2)
972 Ff_elem = -(Kfc_elem @ Uc_elem)[:, :, 0]
973 Ff_indices = f_dof_indices[np.ix_(vec_range, [0], f_dof)][:, 0, :]
974
975 # Extracting Ff force vector in dense format
976 # We have to sum duplicate indices - using bincount
977 Ff = np.bincount(np.ravel(Ff_indices), weights=np.ravel(Ff_elem))
978 return Kff_rows, Kff_cols, Kff_vals, Ff
979
980
981# :class:_DOF_estimator, _DOF_estimator_user, _DOF_estimator_geom,
982# _DOF_estimator_min_E
983# Private classes used to compute the degree of freedom of each triangular
984# element for the TriCubicInterpolator.
985class _DOF_estimator:
986 """
987 Abstract base class for classes used to estimate a function's first
988 derivatives, and deduce the dofs for a CubicTriInterpolator using a
989 reduced HCT element formulation.
990
991 Derived classes implement ``compute_df(self, **kwargs)``, returning
992 ``np.vstack([dfx, dfy]).T`` where ``dfx, dfy`` are the estimation of the 2
993 gradient coordinates.
994 """
995 def __init__(self, interpolator, **kwargs):
996 _api.check_isinstance(CubicTriInterpolator, interpolator=interpolator)
997 self._pts = interpolator._pts
998 self._tris_pts = interpolator._tris_pts
999 self.z = interpolator._z
1000 self._triangles = interpolator._triangles
1001 (self._unit_x, self._unit_y) = (interpolator._unit_x,
1002 interpolator._unit_y)
1003 self.dz = self.compute_dz(**kwargs)
1004 self.compute_dof_from_df()
1005
1006 def compute_dz(self, **kwargs):
1007 raise NotImplementedError
1008
1009 def compute_dof_from_df(self):
1010 """
1011 Compute reduced-HCT elements degrees of freedom, from the gradient.
1012 """
1013 J = CubicTriInterpolator._get_jacobian(self._tris_pts)
1014 tri_z = self.z[self._triangles]
1015 tri_dz = self.dz[self._triangles]
1016 tri_dof = self.get_dof_vec(tri_z, tri_dz, J)
1017 return tri_dof
1018
1019 @staticmethod
1020 def get_dof_vec(tri_z, tri_dz, J):
1021 """
1022 Compute the dof vector of a triangle, from the value of f, df and
1023 of the local Jacobian at each node.
1024
1025 Parameters
1026 ----------
1027 tri_z : shape (3,) array
1028 f nodal values.
1029 tri_dz : shape (3, 2) array
1030 df/dx, df/dy nodal values.
1031 J
1032 Jacobian matrix in local basis of apex 0.
1033
1034 Returns
1035 -------
1036 dof : shape (9,) array
1037 For each apex ``iapex``::
1038
1039 dof[iapex*3+0] = f(Ai)
1040 dof[iapex*3+1] = df(Ai).(AiAi+)
1041 dof[iapex*3+2] = df(Ai).(AiAi-)
1042 """
1043 npt = tri_z.shape[0]
1044 dof = np.zeros([npt, 9], dtype=np.float64)
1045 J1 = _ReducedHCT_Element.J0_to_J1 @ J
1046 J2 = _ReducedHCT_Element.J0_to_J2 @ J
1047
1048 col0 = J @ np.expand_dims(tri_dz[:, 0, :], axis=2)
1049 col1 = J1 @ np.expand_dims(tri_dz[:, 1, :], axis=2)
1050 col2 = J2 @ np.expand_dims(tri_dz[:, 2, :], axis=2)
1051
1052 dfdksi = _to_matrix_vectorized([
1053 [col0[:, 0, 0], col1[:, 0, 0], col2[:, 0, 0]],
1054 [col0[:, 1, 0], col1[:, 1, 0], col2[:, 1, 0]]])
1055 dof[:, 0:7:3] = tri_z
1056 dof[:, 1:8:3] = dfdksi[:, 0]
1057 dof[:, 2:9:3] = dfdksi[:, 1]
1058 return dof
1059
1060
1061class _DOF_estimator_user(_DOF_estimator):
1062 """dz is imposed by user; accounts for scaling if any."""
1063
1064 def compute_dz(self, dz):
1065 (dzdx, dzdy) = dz
1066 dzdx = dzdx * self._unit_x
1067 dzdy = dzdy * self._unit_y
1068 return np.vstack([dzdx, dzdy]).T
1069
1070
1071class _DOF_estimator_geom(_DOF_estimator):
1072 """Fast 'geometric' approximation, recommended for large arrays."""
1073
1074 def compute_dz(self):
1075 """
1076 self.df is computed as weighted average of _triangles sharing a common
1077 node. On each triangle itri f is first assumed linear (= ~f), which
1078 allows to compute d~f[itri]
1079 Then the following approximation of df nodal values is then proposed:
1080 f[ipt] = SUM ( w[itri] x d~f[itri] , for itri sharing apex ipt)
1081 The weighted coeff. w[itri] are proportional to the angle of the
1082 triangle itri at apex ipt
1083 """
1084 el_geom_w = self.compute_geom_weights()
1085 el_geom_grad = self.compute_geom_grads()
1086
1087 # Sum of weights coeffs
1088 w_node_sum = np.bincount(np.ravel(self._triangles),
1089 weights=np.ravel(el_geom_w))
1090
1091 # Sum of weighted df = (dfx, dfy)
1092 dfx_el_w = np.empty_like(el_geom_w)
1093 dfy_el_w = np.empty_like(el_geom_w)
1094 for iapex in range(3):
1095 dfx_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 0]
1096 dfy_el_w[:, iapex] = el_geom_w[:, iapex]*el_geom_grad[:, 1]
1097 dfx_node_sum = np.bincount(np.ravel(self._triangles),
1098 weights=np.ravel(dfx_el_w))
1099 dfy_node_sum = np.bincount(np.ravel(self._triangles),
1100 weights=np.ravel(dfy_el_w))
1101
1102 # Estimation of df
1103 dfx_estim = dfx_node_sum/w_node_sum
1104 dfy_estim = dfy_node_sum/w_node_sum
1105 return np.vstack([dfx_estim, dfy_estim]).T
1106
1107 def compute_geom_weights(self):
1108 """
1109 Build the (nelems, 3) weights coeffs of _triangles angles,
1110 renormalized so that np.sum(weights, axis=1) == np.ones(nelems)
1111 """
1112 weights = np.zeros([np.size(self._triangles, 0), 3])
1113 tris_pts = self._tris_pts
1114 for ipt in range(3):
1115 p0 = tris_pts[:, ipt % 3, :]
1116 p1 = tris_pts[:, (ipt+1) % 3, :]
1117 p2 = tris_pts[:, (ipt-1) % 3, :]
1118 alpha1 = np.arctan2(p1[:, 1]-p0[:, 1], p1[:, 0]-p0[:, 0])
1119 alpha2 = np.arctan2(p2[:, 1]-p0[:, 1], p2[:, 0]-p0[:, 0])
1120 # In the below formula we could take modulo 2. but
1121 # modulo 1. is safer regarding round-off errors (flat triangles).
1122 angle = np.abs(((alpha2-alpha1) / np.pi) % 1)
1123 # Weight proportional to angle up np.pi/2; null weight for
1124 # degenerated cases 0 and np.pi (note that *angle* is normalized
1125 # by np.pi).
1126 weights[:, ipt] = 0.5 - np.abs(angle-0.5)
1127 return weights
1128
1129 def compute_geom_grads(self):
1130 """
1131 Compute the (global) gradient component of f assumed linear (~f).
1132 returns array df of shape (nelems, 2)
1133 df[ielem].dM[ielem] = dz[ielem] i.e. df = dz x dM = dM.T^-1 x dz
1134 """
1135 tris_pts = self._tris_pts
1136 tris_f = self.z[self._triangles]
1137
1138 dM1 = tris_pts[:, 1, :] - tris_pts[:, 0, :]
1139 dM2 = tris_pts[:, 2, :] - tris_pts[:, 0, :]
1140 dM = np.dstack([dM1, dM2])
1141 # Here we try to deal with the simplest colinear cases: a null
1142 # gradient is assumed in this case.
1143 dM_inv = _safe_inv22_vectorized(dM)
1144
1145 dZ1 = tris_f[:, 1] - tris_f[:, 0]
1146 dZ2 = tris_f[:, 2] - tris_f[:, 0]
1147 dZ = np.vstack([dZ1, dZ2]).T
1148 df = np.empty_like(dZ)
1149
1150 # With np.einsum: could be ej,eji -> ej
1151 df[:, 0] = dZ[:, 0]*dM_inv[:, 0, 0] + dZ[:, 1]*dM_inv[:, 1, 0]
1152 df[:, 1] = dZ[:, 0]*dM_inv[:, 0, 1] + dZ[:, 1]*dM_inv[:, 1, 1]
1153 return df
1154
1155
1156class _DOF_estimator_min_E(_DOF_estimator_geom):
1157 """
1158 The 'smoothest' approximation, df is computed through global minimization
1159 of the bending energy:
1160 E(f) = integral[(d2z/dx2 + d2z/dy2 + 2 d2z/dxdy)**2 dA]
1161 """
1162 def __init__(self, Interpolator):
1163 self._eccs = Interpolator._eccs
1164 super().__init__(Interpolator)
1165
1166 def compute_dz(self):
1167 """
1168 Elliptic solver for bending energy minimization.
1169 Uses a dedicated 'toy' sparse Jacobi PCG solver.
1170 """
1171 # Initial guess for iterative PCG solver.
1172 dz_init = super().compute_dz()
1173 Uf0 = np.ravel(dz_init)
1174
1175 reference_element = _ReducedHCT_Element()
1176 J = CubicTriInterpolator._get_jacobian(self._tris_pts)
1177 eccs = self._eccs
1178 triangles = self._triangles
1179 Uc = self.z[self._triangles]
1180
1181 # Building stiffness matrix and force vector in coo format
1182 Kff_rows, Kff_cols, Kff_vals, Ff = reference_element.get_Kff_and_Ff(
1183 J, eccs, triangles, Uc)
1184
1185 # Building sparse matrix and solving minimization problem
1186 # We could use scipy.sparse direct solver; however to avoid this
1187 # external dependency an implementation of a simple PCG solver with
1188 # a simple diagonal Jacobi preconditioner is implemented.
1189 tol = 1.e-10
1190 n_dof = Ff.shape[0]
1191 Kff_coo = _Sparse_Matrix_coo(Kff_vals, Kff_rows, Kff_cols,
1192 shape=(n_dof, n_dof))
1193 Kff_coo.compress_csc()
1194 Uf, err = _cg(A=Kff_coo, b=Ff, x0=Uf0, tol=tol)
1195 # If the PCG did not converge, we return the best guess between Uf0
1196 # and Uf.
1197 err0 = np.linalg.norm(Kff_coo.dot(Uf0) - Ff)
1198 if err0 < err:
1199 # Maybe a good occasion to raise a warning here ?
1200 _api.warn_external("In TriCubicInterpolator initialization, "
1201 "PCG sparse solver did not converge after "
1202 "1000 iterations. `geom` approximation is "
1203 "used instead of `min_E`")
1204 Uf = Uf0
1205
1206 # Building dz from Uf
1207 dz = np.empty([self._pts.shape[0], 2], dtype=np.float64)
1208 dz[:, 0] = Uf[::2]
1209 dz[:, 1] = Uf[1::2]
1210 return dz
1211
1212
1213# The following private :class:_Sparse_Matrix_coo and :func:_cg provide
1214# a PCG sparse solver for (symmetric) elliptic problems.
1215class _Sparse_Matrix_coo:
1216 def __init__(self, vals, rows, cols, shape):
1217 """
1218 Create a sparse matrix in coo format.
1219 *vals*: arrays of values of non-null entries of the matrix
1220 *rows*: int arrays of rows of non-null entries of the matrix
1221 *cols*: int arrays of cols of non-null entries of the matrix
1222 *shape*: 2-tuple (n, m) of matrix shape
1223 """
1224 self.n, self.m = shape
1225 self.vals = np.asarray(vals, dtype=np.float64)
1226 self.rows = np.asarray(rows, dtype=np.int32)
1227 self.cols = np.asarray(cols, dtype=np.int32)
1228
1229 def dot(self, V):
1230 """
1231 Dot product of self by a vector *V* in sparse-dense to dense format
1232 *V* dense vector of shape (self.m,).
1233 """
1234 assert V.shape == (self.m,)
1235 return np.bincount(self.rows,
1236 weights=self.vals*V[self.cols],
1237 minlength=self.m)
1238
1239 def compress_csc(self):
1240 """
1241 Compress rows, cols, vals / summing duplicates. Sort for csc format.
1242 """
1243 _, unique, indices = np.unique(
1244 self.rows + self.n*self.cols,
1245 return_index=True, return_inverse=True)
1246 self.rows = self.rows[unique]
1247 self.cols = self.cols[unique]
1248 self.vals = np.bincount(indices, weights=self.vals)
1249
1250 def compress_csr(self):
1251 """
1252 Compress rows, cols, vals / summing duplicates. Sort for csr format.
1253 """
1254 _, unique, indices = np.unique(
1255 self.m*self.rows + self.cols,
1256 return_index=True, return_inverse=True)
1257 self.rows = self.rows[unique]
1258 self.cols = self.cols[unique]
1259 self.vals = np.bincount(indices, weights=self.vals)
1260
1261 def to_dense(self):
1262 """
1263 Return a dense matrix representing self, mainly for debugging purposes.
1264 """
1265 ret = np.zeros([self.n, self.m], dtype=np.float64)
1266 nvals = self.vals.size
1267 for i in range(nvals):
1268 ret[self.rows[i], self.cols[i]] += self.vals[i]
1269 return ret
1270
1271 def __str__(self):
1272 return self.to_dense().__str__()
1273
1274 @property
1275 def diag(self):
1276 """Return the (dense) vector of the diagonal elements."""
1277 in_diag = (self.rows == self.cols)
1278 diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.
1279 diag[self.rows[in_diag]] = self.vals[in_diag]
1280 return diag
1281
1282
1283def _cg(A, b, x0=None, tol=1.e-10, maxiter=1000):
1284 """
1285 Use Preconditioned Conjugate Gradient iteration to solve A x = b
1286 A simple Jacobi (diagonal) preconditioner is used.
1287
1288 Parameters
1289 ----------
1290 A : _Sparse_Matrix_coo
1291 *A* must have been compressed before by compress_csc or
1292 compress_csr method.
1293 b : array
1294 Right hand side of the linear system.
1295 x0 : array, optional
1296 Starting guess for the solution. Defaults to the zero vector.
1297 tol : float, optional
1298 Tolerance to achieve. The algorithm terminates when the relative
1299 residual is below tol. Default is 1e-10.
1300 maxiter : int, optional
1301 Maximum number of iterations. Iteration will stop after *maxiter*
1302 steps even if the specified tolerance has not been achieved. Defaults
1303 to 1000.
1304
1305 Returns
1306 -------
1307 x : array
1308 The converged solution.
1309 err : float
1310 The absolute error np.linalg.norm(A.dot(x) - b)
1311 """
1312 n = b.size
1313 assert A.n == n
1314 assert A.m == n
1315 b_norm = np.linalg.norm(b)
1316
1317 # Jacobi pre-conditioner
1318 kvec = A.diag
1319 # For diag elem < 1e-6 we keep 1e-6.
1320 kvec = np.maximum(kvec, 1e-6)
1321
1322 # Initial guess
1323 if x0 is None:
1324 x = np.zeros(n)
1325 else:
1326 x = x0
1327
1328 r = b - A.dot(x)
1329 w = r/kvec
1330
1331 p = np.zeros(n)
1332 beta = 0.0
1333 rho = np.dot(r, w)
1334 k = 0
1335
1336 # Following C. T. Kelley
1337 while (np.sqrt(abs(rho)) > tol*b_norm) and (k < maxiter):
1338 p = w + beta*p
1339 z = A.dot(p)
1340 alpha = rho/np.dot(p, z)
1341 r = r - alpha*z
1342 w = r/kvec
1343 rhoold = rho
1344 rho = np.dot(r, w)
1345 x = x + alpha*p
1346 beta = rho/rhoold
1347 # err = np.linalg.norm(A.dot(x) - b) # absolute accuracy - not used
1348 k += 1
1349 err = np.linalg.norm(A.dot(x) - b)
1350 return x, err
1351
1352
1353# The following private functions:
1354# :func:`_safe_inv22_vectorized`
1355# :func:`_pseudo_inv22sym_vectorized`
1356# :func:`_scalar_vectorized`
1357# :func:`_transpose_vectorized`
1358# :func:`_roll_vectorized`
1359# :func:`_to_matrix_vectorized`
1360# :func:`_extract_submatrices`
1361# provide fast numpy implementation of some standard operations on arrays of
1362# matrices - stored as (:, n_rows, n_cols)-shaped np.arrays.
1363
1364# Development note: Dealing with pathologic 'flat' triangles in the
1365# CubicTriInterpolator code and impact on (2, 2)-matrix inversion functions
1366# :func:`_safe_inv22_vectorized` and :func:`_pseudo_inv22sym_vectorized`.
1367#
1368# Goals:
1369# 1) The CubicTriInterpolator should be able to handle flat or almost flat
1370# triangles without raising an error,
1371# 2) These degenerated triangles should have no impact on the automatic dof
1372# calculation (associated with null weight for the _DOF_estimator_geom and
1373# with null energy for the _DOF_estimator_min_E),
1374# 3) Linear patch test should be passed exactly on degenerated meshes,
1375# 4) Interpolation (with :meth:`_interpolate_single_key` or
1376# :meth:`_interpolate_multi_key`) shall be correctly handled even *inside*
1377# the pathologic triangles, to interact correctly with a TriRefiner class.
1378#
1379# Difficulties:
1380# Flat triangles have rank-deficient *J* (so-called jacobian matrix) and
1381# *metric* (the metric tensor = J x J.T). Computation of the local
1382# tangent plane is also problematic.
1383#
1384# Implementation:
1385# Most of the time, when computing the inverse of a rank-deficient matrix it
1386# is safe to simply return the null matrix (which is the implementation in
1387# :func:`_safe_inv22_vectorized`). This is because of point 2), itself
1388# enforced by:
1389# - null area hence null energy in :class:`_DOF_estimator_min_E`
1390# - angles close or equal to 0 or np.pi hence null weight in
1391# :class:`_DOF_estimator_geom`.
1392# Note that the function angle -> weight is continuous and maximum for an
1393# angle np.pi/2 (refer to :meth:`compute_geom_weights`)
1394# The exception is the computation of barycentric coordinates, which is done
1395# by inversion of the *metric* matrix. In this case, we need to compute a set
1396# of valid coordinates (1 among numerous possibilities), to ensure point 4).
1397# We benefit here from the symmetry of metric = J x J.T, which makes it easier
1398# to compute a pseudo-inverse in :func:`_pseudo_inv22sym_vectorized`
1399def _safe_inv22_vectorized(M):
1400 """
1401 Inversion of arrays of (2, 2) matrices, returns 0 for rank-deficient
1402 matrices.
1403
1404 *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)
1405 """
1406 _api.check_shape((None, 2, 2), M=M)
1407 M_inv = np.empty_like(M)
1408 prod1 = M[:, 0, 0]*M[:, 1, 1]
1409 delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
1410
1411 # We set delta_inv to 0. in case of a rank deficient matrix; a
1412 # rank-deficient input matrix *M* will lead to a null matrix in output
1413 rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
1414 if np.all(rank2):
1415 # Normal 'optimized' flow.
1416 delta_inv = 1./delta
1417 else:
1418 # 'Pathologic' flow.
1419 delta_inv = np.zeros(M.shape[0])
1420 delta_inv[rank2] = 1./delta[rank2]
1421
1422 M_inv[:, 0, 0] = M[:, 1, 1]*delta_inv
1423 M_inv[:, 0, 1] = -M[:, 0, 1]*delta_inv
1424 M_inv[:, 1, 0] = -M[:, 1, 0]*delta_inv
1425 M_inv[:, 1, 1] = M[:, 0, 0]*delta_inv
1426 return M_inv
1427
1428
1429def _pseudo_inv22sym_vectorized(M):
1430 """
1431 Inversion of arrays of (2, 2) SYMMETRIC matrices; returns the
1432 (Moore-Penrose) pseudo-inverse for rank-deficient matrices.
1433
1434 In case M is of rank 1, we have M = trace(M) x P where P is the orthogonal
1435 projection on Im(M), and we return trace(M)^-1 x P == M / trace(M)**2
1436 In case M is of rank 0, we return the null matrix.
1437
1438 *M* : array of (2, 2) matrices to inverse, shape (n, 2, 2)
1439 """
1440 _api.check_shape((None, 2, 2), M=M)
1441 M_inv = np.empty_like(M)
1442 prod1 = M[:, 0, 0]*M[:, 1, 1]
1443 delta = prod1 - M[:, 0, 1]*M[:, 1, 0]
1444 rank2 = (np.abs(delta) > 1e-8*np.abs(prod1))
1445
1446 if np.all(rank2):
1447 # Normal 'optimized' flow.
1448 M_inv[:, 0, 0] = M[:, 1, 1] / delta
1449 M_inv[:, 0, 1] = -M[:, 0, 1] / delta
1450 M_inv[:, 1, 0] = -M[:, 1, 0] / delta
1451 M_inv[:, 1, 1] = M[:, 0, 0] / delta
1452 else:
1453 # 'Pathologic' flow.
1454 # Here we have to deal with 2 sub-cases
1455 # 1) First sub-case: matrices of rank 2:
1456 delta = delta[rank2]
1457 M_inv[rank2, 0, 0] = M[rank2, 1, 1] / delta
1458 M_inv[rank2, 0, 1] = -M[rank2, 0, 1] / delta
1459 M_inv[rank2, 1, 0] = -M[rank2, 1, 0] / delta
1460 M_inv[rank2, 1, 1] = M[rank2, 0, 0] / delta
1461 # 2) Second sub-case: rank-deficient matrices of rank 0 and 1:
1462 rank01 = ~rank2
1463 tr = M[rank01, 0, 0] + M[rank01, 1, 1]
1464 tr_zeros = (np.abs(tr) < 1.e-8)
1465 sq_tr_inv = (1.-tr_zeros) / (tr**2+tr_zeros)
1466 # sq_tr_inv = 1. / tr**2
1467 M_inv[rank01, 0, 0] = M[rank01, 0, 0] * sq_tr_inv
1468 M_inv[rank01, 0, 1] = M[rank01, 0, 1] * sq_tr_inv
1469 M_inv[rank01, 1, 0] = M[rank01, 1, 0] * sq_tr_inv
1470 M_inv[rank01, 1, 1] = M[rank01, 1, 1] * sq_tr_inv
1471
1472 return M_inv
1473
1474
1475def _scalar_vectorized(scalar, M):
1476 """
1477 Scalar product between scalars and matrices.
1478 """
1479 return scalar[:, np.newaxis, np.newaxis]*M
1480
1481
1482def _transpose_vectorized(M):
1483 """
1484 Transposition of an array of matrices *M*.
1485 """
1486 return np.transpose(M, [0, 2, 1])
1487
1488
1489def _roll_vectorized(M, roll_indices, axis):
1490 """
1491 Roll an array of matrices along *axis* (0: rows, 1: columns) according to
1492 an array of indices *roll_indices*.
1493 """
1494 assert axis in [0, 1]
1495 ndim = M.ndim
1496 assert ndim == 3
1497 ndim_roll = roll_indices.ndim
1498 assert ndim_roll == 1
1499 sh = M.shape
1500 r, c = sh[-2:]
1501 assert sh[0] == roll_indices.shape[0]
1502 vec_indices = np.arange(sh[0], dtype=np.int32)
1503
1504 # Builds the rolled matrix
1505 M_roll = np.empty_like(M)
1506 if axis == 0:
1507 for ir in range(r):
1508 for ic in range(c):
1509 M_roll[:, ir, ic] = M[vec_indices, (-roll_indices+ir) % r, ic]
1510 else: # 1
1511 for ir in range(r):
1512 for ic in range(c):
1513 M_roll[:, ir, ic] = M[vec_indices, ir, (-roll_indices+ic) % c]
1514 return M_roll
1515
1516
1517def _to_matrix_vectorized(M):
1518 """
1519 Build an array of matrices from individuals np.arrays of identical shapes.
1520
1521 Parameters
1522 ----------
1523 M
1524 ncols-list of nrows-lists of shape sh.
1525
1526 Returns
1527 -------
1528 M_res : np.array of shape (sh, nrow, ncols)
1529 *M_res* satisfies ``M_res[..., i, j] = M[i][j]``.
1530 """
1531 assert isinstance(M, (tuple, list))
1532 assert all(isinstance(item, (tuple, list)) for item in M)
1533 c_vec = np.asarray([len(item) for item in M])
1534 assert np.all(c_vec-c_vec[0] == 0)
1535 r = len(M)
1536 c = c_vec[0]
1537 M00 = np.asarray(M[0][0])
1538 dt = M00.dtype
1539 sh = [M00.shape[0], r, c]
1540 M_ret = np.empty(sh, dtype=dt)
1541 for irow in range(r):
1542 for icol in range(c):
1543 M_ret[:, irow, icol] = np.asarray(M[irow][icol])
1544 return M_ret
1545
1546
1547def _extract_submatrices(M, block_indices, block_size, axis):
1548 """
1549 Extract selected blocks of a matrices *M* depending on parameters
1550 *block_indices* and *block_size*.
1551
1552 Returns the array of extracted matrices *Mres* so that ::
1553
1554 M_res[..., ir, :] = M[(block_indices*block_size+ir), :]
1555 """
1556 assert block_indices.ndim == 1
1557 assert axis in [0, 1]
1558
1559 r, c = M.shape
1560 if axis == 0:
1561 sh = [block_indices.shape[0], block_size, c]
1562 else: # 1
1563 sh = [block_indices.shape[0], r, block_size]
1564
1565 dt = M.dtype
1566 M_res = np.empty(sh, dtype=dt)
1567 if axis == 0:
1568 for ir in range(block_size):
1569 M_res[:, ir, :] = M[(block_indices*block_size+ir), :]
1570 else: # 1
1571 for ic in range(block_size):
1572 M_res[:, :, ic] = M[:, (block_indices*block_size+ic)]
1573
1574 return M_res