Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/integrate/_ivp/common.py: 11%
213 statements
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
« prev ^ index » next coverage.py v7.3.2, created at 2023-12-12 06:31 +0000
1from itertools import groupby
2from warnings import warn
3import numpy as np
4from scipy.sparse import find, coo_matrix
7EPS = np.finfo(float).eps
10def validate_first_step(first_step, t0, t_bound):
11 """Assert that first_step is valid and return it."""
12 if first_step <= 0:
13 raise ValueError("`first_step` must be positive.")
14 if first_step > np.abs(t_bound - t0):
15 raise ValueError("`first_step` exceeds bounds.")
16 return first_step
19def validate_max_step(max_step):
20 """Assert that max_Step is valid and return it."""
21 if max_step <= 0:
22 raise ValueError("`max_step` must be positive.")
23 return max_step
26def warn_extraneous(extraneous):
27 """Display a warning for extraneous keyword arguments.
29 The initializer of each solver class is expected to collect keyword
30 arguments that it doesn't understand and warn about them. This function
31 prints a warning for each key in the supplied dictionary.
33 Parameters
34 ----------
35 extraneous : dict
36 Extraneous keyword arguments
37 """
38 if extraneous:
39 warn("The following arguments have no effect for a chosen solver: {}."
40 .format(", ".join("`{}`".format(x) for x in extraneous)))
43def validate_tol(rtol, atol, n):
44 """Validate tolerance values."""
46 if np.any(rtol < 100 * EPS):
47 warn("At least one element of `rtol` is too small. "
48 f"Setting `rtol = np.maximum(rtol, {100 * EPS})`.")
49 rtol = np.maximum(rtol, 100 * EPS)
51 atol = np.asarray(atol)
52 if atol.ndim > 0 and atol.shape != (n,):
53 raise ValueError("`atol` has wrong shape.")
55 if np.any(atol < 0):
56 raise ValueError("`atol` must be positive.")
58 return rtol, atol
61def norm(x):
62 """Compute RMS norm."""
63 return np.linalg.norm(x) / x.size ** 0.5
66def select_initial_step(fun, t0, y0, f0, direction, order, rtol, atol):
67 """Empirically select a good initial step.
69 The algorithm is described in [1]_.
71 Parameters
72 ----------
73 fun : callable
74 Right-hand side of the system.
75 t0 : float
76 Initial value of the independent variable.
77 y0 : ndarray, shape (n,)
78 Initial value of the dependent variable.
79 f0 : ndarray, shape (n,)
80 Initial value of the derivative, i.e., ``fun(t0, y0)``.
81 direction : float
82 Integration direction.
83 order : float
84 Error estimator order. It means that the error controlled by the
85 algorithm is proportional to ``step_size ** (order + 1)`.
86 rtol : float
87 Desired relative tolerance.
88 atol : float
89 Desired absolute tolerance.
91 Returns
92 -------
93 h_abs : float
94 Absolute value of the suggested initial step.
96 References
97 ----------
98 .. [1] E. Hairer, S. P. Norsett G. Wanner, "Solving Ordinary Differential
99 Equations I: Nonstiff Problems", Sec. II.4.
100 """
101 if y0.size == 0:
102 return np.inf
104 scale = atol + np.abs(y0) * rtol
105 d0 = norm(y0 / scale)
106 d1 = norm(f0 / scale)
107 if d0 < 1e-5 or d1 < 1e-5:
108 h0 = 1e-6
109 else:
110 h0 = 0.01 * d0 / d1
112 y1 = y0 + h0 * direction * f0
113 f1 = fun(t0 + h0 * direction, y1)
114 d2 = norm((f1 - f0) / scale) / h0
116 if d1 <= 1e-15 and d2 <= 1e-15:
117 h1 = max(1e-6, h0 * 1e-3)
118 else:
119 h1 = (0.01 / max(d1, d2)) ** (1 / (order + 1))
121 return min(100 * h0, h1)
124class OdeSolution:
125 """Continuous ODE solution.
127 It is organized as a collection of `DenseOutput` objects which represent
128 local interpolants. It provides an algorithm to select a right interpolant
129 for each given point.
131 The interpolants cover the range between `t_min` and `t_max` (see
132 Attributes below). Evaluation outside this interval is not forbidden, but
133 the accuracy is not guaranteed.
135 When evaluating at a breakpoint (one of the values in `ts`) a segment with
136 the lower index is selected.
138 Parameters
139 ----------
140 ts : array_like, shape (n_segments + 1,)
141 Time instants between which local interpolants are defined. Must
142 be strictly increasing or decreasing (zero segment with two points is
143 also allowed).
144 interpolants : list of DenseOutput with n_segments elements
145 Local interpolants. An i-th interpolant is assumed to be defined
146 between ``ts[i]`` and ``ts[i + 1]``.
148 Attributes
149 ----------
150 t_min, t_max : float
151 Time range of the interpolation.
152 """
153 def __init__(self, ts, interpolants):
154 ts = np.asarray(ts)
155 d = np.diff(ts)
156 # The first case covers integration on zero segment.
157 if not ((ts.size == 2 and ts[0] == ts[-1])
158 or np.all(d > 0) or np.all(d < 0)):
159 raise ValueError("`ts` must be strictly increasing or decreasing.")
161 self.n_segments = len(interpolants)
162 if ts.shape != (self.n_segments + 1,):
163 raise ValueError("Numbers of time stamps and interpolants "
164 "don't match.")
166 self.ts = ts
167 self.interpolants = interpolants
168 if ts[-1] >= ts[0]:
169 self.t_min = ts[0]
170 self.t_max = ts[-1]
171 self.ascending = True
172 self.ts_sorted = ts
173 else:
174 self.t_min = ts[-1]
175 self.t_max = ts[0]
176 self.ascending = False
177 self.ts_sorted = ts[::-1]
179 def _call_single(self, t):
180 # Here we preserve a certain symmetry that when t is in self.ts,
181 # then we prioritize a segment with a lower index.
182 if self.ascending:
183 ind = np.searchsorted(self.ts_sorted, t, side='left')
184 else:
185 ind = np.searchsorted(self.ts_sorted, t, side='right')
187 segment = min(max(ind - 1, 0), self.n_segments - 1)
188 if not self.ascending:
189 segment = self.n_segments - 1 - segment
191 return self.interpolants[segment](t)
193 def __call__(self, t):
194 """Evaluate the solution.
196 Parameters
197 ----------
198 t : float or array_like with shape (n_points,)
199 Points to evaluate at.
201 Returns
202 -------
203 y : ndarray, shape (n_states,) or (n_states, n_points)
204 Computed values. Shape depends on whether `t` is a scalar or a
205 1-D array.
206 """
207 t = np.asarray(t)
209 if t.ndim == 0:
210 return self._call_single(t)
212 order = np.argsort(t)
213 reverse = np.empty_like(order)
214 reverse[order] = np.arange(order.shape[0])
215 t_sorted = t[order]
217 # See comment in self._call_single.
218 if self.ascending:
219 segments = np.searchsorted(self.ts_sorted, t_sorted, side='left')
220 else:
221 segments = np.searchsorted(self.ts_sorted, t_sorted, side='right')
222 segments -= 1
223 segments[segments < 0] = 0
224 segments[segments > self.n_segments - 1] = self.n_segments - 1
225 if not self.ascending:
226 segments = self.n_segments - 1 - segments
228 ys = []
229 group_start = 0
230 for segment, group in groupby(segments):
231 group_end = group_start + len(list(group))
232 y = self.interpolants[segment](t_sorted[group_start:group_end])
233 ys.append(y)
234 group_start = group_end
236 ys = np.hstack(ys)
237 ys = ys[:, reverse]
239 return ys
242NUM_JAC_DIFF_REJECT = EPS ** 0.875
243NUM_JAC_DIFF_SMALL = EPS ** 0.75
244NUM_JAC_DIFF_BIG = EPS ** 0.25
245NUM_JAC_MIN_FACTOR = 1e3 * EPS
246NUM_JAC_FACTOR_INCREASE = 10
247NUM_JAC_FACTOR_DECREASE = 0.1
250def num_jac(fun, t, y, f, threshold, factor, sparsity=None):
251 """Finite differences Jacobian approximation tailored for ODE solvers.
253 This function computes finite difference approximation to the Jacobian
254 matrix of `fun` with respect to `y` using forward differences.
255 The Jacobian matrix has shape (n, n) and its element (i, j) is equal to
256 ``d f_i / d y_j``.
258 A special feature of this function is the ability to correct the step
259 size from iteration to iteration. The main idea is to keep the finite
260 difference significantly separated from its round-off error which
261 approximately equals ``EPS * np.abs(f)``. It reduces a possibility of a
262 huge error and assures that the estimated derivative are reasonably close
263 to the true values (i.e., the finite difference approximation is at least
264 qualitatively reflects the structure of the true Jacobian).
266 Parameters
267 ----------
268 fun : callable
269 Right-hand side of the system implemented in a vectorized fashion.
270 t : float
271 Current time.
272 y : ndarray, shape (n,)
273 Current state.
274 f : ndarray, shape (n,)
275 Value of the right hand side at (t, y).
276 threshold : float
277 Threshold for `y` value used for computing the step size as
278 ``factor * np.maximum(np.abs(y), threshold)``. Typically, the value of
279 absolute tolerance (atol) for a solver should be passed as `threshold`.
280 factor : ndarray with shape (n,) or None
281 Factor to use for computing the step size. Pass None for the very
282 evaluation, then use the value returned from this function.
283 sparsity : tuple (structure, groups) or None
284 Sparsity structure of the Jacobian, `structure` must be csc_matrix.
286 Returns
287 -------
288 J : ndarray or csc_matrix, shape (n, n)
289 Jacobian matrix.
290 factor : ndarray, shape (n,)
291 Suggested `factor` for the next evaluation.
292 """
293 y = np.asarray(y)
294 n = y.shape[0]
295 if n == 0:
296 return np.empty((0, 0)), factor
298 if factor is None:
299 factor = np.full(n, EPS ** 0.5)
300 else:
301 factor = factor.copy()
303 # Direct the step as ODE dictates, hoping that such a step won't lead to
304 # a problematic region. For complex ODEs it makes sense to use the real
305 # part of f as we use steps along real axis.
306 f_sign = 2 * (np.real(f) >= 0).astype(float) - 1
307 y_scale = f_sign * np.maximum(threshold, np.abs(y))
308 h = (y + factor * y_scale) - y
310 # Make sure that the step is not 0 to start with. Not likely it will be
311 # executed often.
312 for i in np.nonzero(h == 0)[0]:
313 while h[i] == 0:
314 factor[i] *= 10
315 h[i] = (y[i] + factor[i] * y_scale[i]) - y[i]
317 if sparsity is None:
318 return _dense_num_jac(fun, t, y, f, h, factor, y_scale)
319 else:
320 structure, groups = sparsity
321 return _sparse_num_jac(fun, t, y, f, h, factor, y_scale,
322 structure, groups)
325def _dense_num_jac(fun, t, y, f, h, factor, y_scale):
326 n = y.shape[0]
327 h_vecs = np.diag(h)
328 f_new = fun(t, y[:, None] + h_vecs)
329 diff = f_new - f[:, None]
330 max_ind = np.argmax(np.abs(diff), axis=0)
331 r = np.arange(n)
332 max_diff = np.abs(diff[max_ind, r])
333 scale = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
335 diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
336 if np.any(diff_too_small):
337 ind, = np.nonzero(diff_too_small)
338 new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
339 h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
340 h_vecs[ind, ind] = h_new
341 f_new = fun(t, y[:, None] + h_vecs[:, ind])
342 diff_new = f_new - f[:, None]
343 max_ind = np.argmax(np.abs(diff_new), axis=0)
344 r = np.arange(ind.shape[0])
345 max_diff_new = np.abs(diff_new[max_ind, r])
346 scale_new = np.maximum(np.abs(f[max_ind]), np.abs(f_new[max_ind, r]))
348 update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
349 if np.any(update):
350 update, = np.nonzero(update)
351 update_ind = ind[update]
352 factor[update_ind] = new_factor[update]
353 h[update_ind] = h_new[update]
354 diff[:, update_ind] = diff_new[:, update]
355 scale[update_ind] = scale_new[update]
356 max_diff[update_ind] = max_diff_new[update]
358 diff /= h
360 factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
361 factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
362 factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
364 return diff, factor
367def _sparse_num_jac(fun, t, y, f, h, factor, y_scale, structure, groups):
368 n = y.shape[0]
369 n_groups = np.max(groups) + 1
370 h_vecs = np.empty((n_groups, n))
371 for group in range(n_groups):
372 e = np.equal(group, groups)
373 h_vecs[group] = h * e
374 h_vecs = h_vecs.T
376 f_new = fun(t, y[:, None] + h_vecs)
377 df = f_new - f[:, None]
379 i, j, _ = find(structure)
380 diff = coo_matrix((df[i, groups[j]], (i, j)), shape=(n, n)).tocsc()
381 max_ind = np.array(abs(diff).argmax(axis=0)).ravel()
382 r = np.arange(n)
383 max_diff = np.asarray(np.abs(diff[max_ind, r])).ravel()
384 scale = np.maximum(np.abs(f[max_ind]),
385 np.abs(f_new[max_ind, groups[r]]))
387 diff_too_small = max_diff < NUM_JAC_DIFF_REJECT * scale
388 if np.any(diff_too_small):
389 ind, = np.nonzero(diff_too_small)
390 new_factor = NUM_JAC_FACTOR_INCREASE * factor[ind]
391 h_new = (y[ind] + new_factor * y_scale[ind]) - y[ind]
392 h_new_all = np.zeros(n)
393 h_new_all[ind] = h_new
395 groups_unique = np.unique(groups[ind])
396 groups_map = np.empty(n_groups, dtype=int)
397 h_vecs = np.empty((groups_unique.shape[0], n))
398 for k, group in enumerate(groups_unique):
399 e = np.equal(group, groups)
400 h_vecs[k] = h_new_all * e
401 groups_map[group] = k
402 h_vecs = h_vecs.T
404 f_new = fun(t, y[:, None] + h_vecs)
405 df = f_new - f[:, None]
406 i, j, _ = find(structure[:, ind])
407 diff_new = coo_matrix((df[i, groups_map[groups[ind[j]]]],
408 (i, j)), shape=(n, ind.shape[0])).tocsc()
410 max_ind_new = np.array(abs(diff_new).argmax(axis=0)).ravel()
411 r = np.arange(ind.shape[0])
412 max_diff_new = np.asarray(np.abs(diff_new[max_ind_new, r])).ravel()
413 scale_new = np.maximum(
414 np.abs(f[max_ind_new]),
415 np.abs(f_new[max_ind_new, groups_map[groups[ind]]]))
417 update = max_diff[ind] * scale_new < max_diff_new * scale[ind]
418 if np.any(update):
419 update, = np.nonzero(update)
420 update_ind = ind[update]
421 factor[update_ind] = new_factor[update]
422 h[update_ind] = h_new[update]
423 diff[:, update_ind] = diff_new[:, update]
424 scale[update_ind] = scale_new[update]
425 max_diff[update_ind] = max_diff_new[update]
427 diff.data /= np.repeat(h, np.diff(diff.indptr))
429 factor[max_diff < NUM_JAC_DIFF_SMALL * scale] *= NUM_JAC_FACTOR_INCREASE
430 factor[max_diff > NUM_JAC_DIFF_BIG * scale] *= NUM_JAC_FACTOR_DECREASE
431 factor = np.maximum(factor, NUM_JAC_MIN_FACTOR)
433 return diff, factor