Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/optimize/_trustregion_constr/canonical_constraint.py: 8%

253 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-12 06:31 +0000

1import numpy as np 

2import scipy.sparse as sps 

3 

4 

5class CanonicalConstraint: 

6 """Canonical constraint to use with trust-constr algorithm. 

7 

8 It represents the set of constraints of the form:: 

9 

10 f_eq(x) = 0 

11 f_ineq(x) <= 0 

12 

13 where ``f_eq`` and ``f_ineq`` are evaluated by a single function, see 

14 below. 

15 

16 The class is supposed to be instantiated by factory methods, which 

17 should prepare the parameters listed below. 

18 

19 Parameters 

20 ---------- 

21 n_eq, n_ineq : int 

22 Number of equality and inequality constraints respectively. 

23 fun : callable 

24 Function defining the constraints. The signature is 

25 ``fun(x) -> c_eq, c_ineq``, where ``c_eq`` is ndarray with `n_eq` 

26 components and ``c_ineq`` is ndarray with `n_ineq` components. 

27 jac : callable 

28 Function to evaluate the Jacobian of the constraint. The signature 

29 is ``jac(x) -> J_eq, J_ineq``, where ``J_eq`` and ``J_ineq`` are 

30 either ndarray of csr_matrix of shapes (n_eq, n) and (n_ineq, n), 

31 respectively. 

32 hess : callable 

33 Function to evaluate the Hessian of the constraints multiplied 

34 by Lagrange multipliers, that is 

35 ``dot(f_eq, v_eq) + dot(f_ineq, v_ineq)``. The signature is 

36 ``hess(x, v_eq, v_ineq) -> H``, where ``H`` has an implied 

37 shape (n, n) and provide a matrix-vector product operation 

38 ``H.dot(p)``. 

39 keep_feasible : ndarray, shape (n_ineq,) 

40 Mask indicating which inequality constraints should be kept feasible. 

41 """ 

42 def __init__(self, n_eq, n_ineq, fun, jac, hess, keep_feasible): 

43 self.n_eq = n_eq 

44 self.n_ineq = n_ineq 

45 self.fun = fun 

46 self.jac = jac 

47 self.hess = hess 

48 self.keep_feasible = keep_feasible 

49 

50 @classmethod 

51 def from_PreparedConstraint(cls, constraint): 

52 """Create an instance from `PreparedConstrained` object.""" 

53 lb, ub = constraint.bounds 

54 cfun = constraint.fun 

55 keep_feasible = constraint.keep_feasible 

56 

57 if np.all(lb == -np.inf) and np.all(ub == np.inf): 

58 return cls.empty(cfun.n) 

59 

60 if np.all(lb == -np.inf) and np.all(ub == np.inf): 

61 return cls.empty(cfun.n) 

62 elif np.all(lb == ub): 

63 return cls._equal_to_canonical(cfun, lb) 

64 elif np.all(lb == -np.inf): 

65 return cls._less_to_canonical(cfun, ub, keep_feasible) 

66 elif np.all(ub == np.inf): 

67 return cls._greater_to_canonical(cfun, lb, keep_feasible) 

68 else: 

69 return cls._interval_to_canonical(cfun, lb, ub, keep_feasible) 

70 

71 @classmethod 

72 def empty(cls, n): 

73 """Create an "empty" instance. 

74 

75 This "empty" instance is required to allow working with unconstrained 

76 problems as if they have some constraints. 

77 """ 

78 empty_fun = np.empty(0) 

79 empty_jac = np.empty((0, n)) 

80 empty_hess = sps.csr_matrix((n, n)) 

81 

82 def fun(x): 

83 return empty_fun, empty_fun 

84 

85 def jac(x): 

86 return empty_jac, empty_jac 

87 

88 def hess(x, v_eq, v_ineq): 

89 return empty_hess 

90 

91 return cls(0, 0, fun, jac, hess, np.empty(0, dtype=np.bool_)) 

92 

93 @classmethod 

94 def concatenate(cls, canonical_constraints, sparse_jacobian): 

95 """Concatenate multiple `CanonicalConstraint` into one. 

96 

97 `sparse_jacobian` (bool) determines the Jacobian format of the 

98 concatenated constraint. Note that items in `canonical_constraints` 

99 must have their Jacobians in the same format. 

100 """ 

101 def fun(x): 

102 if canonical_constraints: 

103 eq_all, ineq_all = zip( 

104 *[c.fun(x) for c in canonical_constraints]) 

105 else: 

106 eq_all, ineq_all = [], [] 

107 

108 return np.hstack(eq_all), np.hstack(ineq_all) 

109 

110 if sparse_jacobian: 

111 vstack = sps.vstack 

112 else: 

113 vstack = np.vstack 

114 

115 def jac(x): 

116 if canonical_constraints: 

117 eq_all, ineq_all = zip( 

118 *[c.jac(x) for c in canonical_constraints]) 

119 else: 

120 eq_all, ineq_all = [], [] 

121 

122 return vstack(eq_all), vstack(ineq_all) 

123 

124 def hess(x, v_eq, v_ineq): 

125 hess_all = [] 

126 index_eq = 0 

127 index_ineq = 0 

128 for c in canonical_constraints: 

129 vc_eq = v_eq[index_eq:index_eq + c.n_eq] 

130 vc_ineq = v_ineq[index_ineq:index_ineq + c.n_ineq] 

131 hess_all.append(c.hess(x, vc_eq, vc_ineq)) 

132 index_eq += c.n_eq 

133 index_ineq += c.n_ineq 

134 

135 def matvec(p): 

136 result = np.zeros_like(p) 

137 for h in hess_all: 

138 result += h.dot(p) 

139 return result 

140 

141 n = x.shape[0] 

142 return sps.linalg.LinearOperator((n, n), matvec, dtype=float) 

143 

144 n_eq = sum(c.n_eq for c in canonical_constraints) 

145 n_ineq = sum(c.n_ineq for c in canonical_constraints) 

146 keep_feasible = np.hstack([c.keep_feasible for c in 

147 canonical_constraints]) 

148 

149 return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) 

150 

151 @classmethod 

152 def _equal_to_canonical(cls, cfun, value): 

153 empty_fun = np.empty(0) 

154 n = cfun.n 

155 

156 n_eq = value.shape[0] 

157 n_ineq = 0 

158 keep_feasible = np.empty(0, dtype=bool) 

159 

160 if cfun.sparse_jacobian: 

161 empty_jac = sps.csr_matrix((0, n)) 

162 else: 

163 empty_jac = np.empty((0, n)) 

164 

165 def fun(x): 

166 return cfun.fun(x) - value, empty_fun 

167 

168 def jac(x): 

169 return cfun.jac(x), empty_jac 

170 

171 def hess(x, v_eq, v_ineq): 

172 return cfun.hess(x, v_eq) 

173 

174 empty_fun = np.empty(0) 

175 n = cfun.n 

176 if cfun.sparse_jacobian: 

177 empty_jac = sps.csr_matrix((0, n)) 

178 else: 

179 empty_jac = np.empty((0, n)) 

180 

181 return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) 

182 

183 @classmethod 

184 def _less_to_canonical(cls, cfun, ub, keep_feasible): 

185 empty_fun = np.empty(0) 

186 n = cfun.n 

187 if cfun.sparse_jacobian: 

188 empty_jac = sps.csr_matrix((0, n)) 

189 else: 

190 empty_jac = np.empty((0, n)) 

191 

192 finite_ub = ub < np.inf 

193 n_eq = 0 

194 n_ineq = np.sum(finite_ub) 

195 

196 if np.all(finite_ub): 

197 def fun(x): 

198 return empty_fun, cfun.fun(x) - ub 

199 

200 def jac(x): 

201 return empty_jac, cfun.jac(x) 

202 

203 def hess(x, v_eq, v_ineq): 

204 return cfun.hess(x, v_ineq) 

205 else: 

206 finite_ub = np.nonzero(finite_ub)[0] 

207 keep_feasible = keep_feasible[finite_ub] 

208 ub = ub[finite_ub] 

209 

210 def fun(x): 

211 return empty_fun, cfun.fun(x)[finite_ub] - ub 

212 

213 def jac(x): 

214 return empty_jac, cfun.jac(x)[finite_ub] 

215 

216 def hess(x, v_eq, v_ineq): 

217 v = np.zeros(cfun.m) 

218 v[finite_ub] = v_ineq 

219 return cfun.hess(x, v) 

220 

221 return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) 

222 

223 @classmethod 

224 def _greater_to_canonical(cls, cfun, lb, keep_feasible): 

225 empty_fun = np.empty(0) 

226 n = cfun.n 

227 if cfun.sparse_jacobian: 

228 empty_jac = sps.csr_matrix((0, n)) 

229 else: 

230 empty_jac = np.empty((0, n)) 

231 

232 finite_lb = lb > -np.inf 

233 n_eq = 0 

234 n_ineq = np.sum(finite_lb) 

235 

236 if np.all(finite_lb): 

237 def fun(x): 

238 return empty_fun, lb - cfun.fun(x) 

239 

240 def jac(x): 

241 return empty_jac, -cfun.jac(x) 

242 

243 def hess(x, v_eq, v_ineq): 

244 return cfun.hess(x, -v_ineq) 

245 else: 

246 finite_lb = np.nonzero(finite_lb)[0] 

247 keep_feasible = keep_feasible[finite_lb] 

248 lb = lb[finite_lb] 

249 

250 def fun(x): 

251 return empty_fun, lb - cfun.fun(x)[finite_lb] 

252 

253 def jac(x): 

254 return empty_jac, -cfun.jac(x)[finite_lb] 

255 

256 def hess(x, v_eq, v_ineq): 

257 v = np.zeros(cfun.m) 

258 v[finite_lb] = -v_ineq 

259 return cfun.hess(x, v) 

260 

261 return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) 

262 

263 @classmethod 

264 def _interval_to_canonical(cls, cfun, lb, ub, keep_feasible): 

265 lb_inf = lb == -np.inf 

266 ub_inf = ub == np.inf 

267 equal = lb == ub 

268 less = lb_inf & ~ub_inf 

269 greater = ub_inf & ~lb_inf 

270 interval = ~equal & ~lb_inf & ~ub_inf 

271 

272 equal = np.nonzero(equal)[0] 

273 less = np.nonzero(less)[0] 

274 greater = np.nonzero(greater)[0] 

275 interval = np.nonzero(interval)[0] 

276 n_less = less.shape[0] 

277 n_greater = greater.shape[0] 

278 n_interval = interval.shape[0] 

279 n_ineq = n_less + n_greater + 2 * n_interval 

280 n_eq = equal.shape[0] 

281 

282 keep_feasible = np.hstack((keep_feasible[less], 

283 keep_feasible[greater], 

284 keep_feasible[interval], 

285 keep_feasible[interval])) 

286 

287 def fun(x): 

288 f = cfun.fun(x) 

289 eq = f[equal] - lb[equal] 

290 le = f[less] - ub[less] 

291 ge = lb[greater] - f[greater] 

292 il = f[interval] - ub[interval] 

293 ig = lb[interval] - f[interval] 

294 return eq, np.hstack((le, ge, il, ig)) 

295 

296 def jac(x): 

297 J = cfun.jac(x) 

298 eq = J[equal] 

299 le = J[less] 

300 ge = -J[greater] 

301 il = J[interval] 

302 ig = -il 

303 if sps.issparse(J): 

304 ineq = sps.vstack((le, ge, il, ig)) 

305 else: 

306 ineq = np.vstack((le, ge, il, ig)) 

307 return eq, ineq 

308 

309 def hess(x, v_eq, v_ineq): 

310 n_start = 0 

311 v_l = v_ineq[n_start:n_start + n_less] 

312 n_start += n_less 

313 v_g = v_ineq[n_start:n_start + n_greater] 

314 n_start += n_greater 

315 v_il = v_ineq[n_start:n_start + n_interval] 

316 n_start += n_interval 

317 v_ig = v_ineq[n_start:n_start + n_interval] 

318 

319 v = np.zeros_like(lb) 

320 v[equal] = v_eq 

321 v[less] = v_l 

322 v[greater] = -v_g 

323 v[interval] = v_il - v_ig 

324 

325 return cfun.hess(x, v) 

326 

327 return cls(n_eq, n_ineq, fun, jac, hess, keep_feasible) 

328 

329 

330def initial_constraints_as_canonical(n, prepared_constraints, sparse_jacobian): 

331 """Convert initial values of the constraints to the canonical format. 

332 

333 The purpose to avoid one additional call to the constraints at the initial 

334 point. It takes saved values in `PreparedConstraint`, modififies and 

335 concatenates them to the canonical constraint format. 

336 """ 

337 c_eq = [] 

338 c_ineq = [] 

339 J_eq = [] 

340 J_ineq = [] 

341 

342 for c in prepared_constraints: 

343 f = c.fun.f 

344 J = c.fun.J 

345 lb, ub = c.bounds 

346 if np.all(lb == ub): 

347 c_eq.append(f - lb) 

348 J_eq.append(J) 

349 elif np.all(lb == -np.inf): 

350 finite_ub = ub < np.inf 

351 c_ineq.append(f[finite_ub] - ub[finite_ub]) 

352 J_ineq.append(J[finite_ub]) 

353 elif np.all(ub == np.inf): 

354 finite_lb = lb > -np.inf 

355 c_ineq.append(lb[finite_lb] - f[finite_lb]) 

356 J_ineq.append(-J[finite_lb]) 

357 else: 

358 lb_inf = lb == -np.inf 

359 ub_inf = ub == np.inf 

360 equal = lb == ub 

361 less = lb_inf & ~ub_inf 

362 greater = ub_inf & ~lb_inf 

363 interval = ~equal & ~lb_inf & ~ub_inf 

364 

365 c_eq.append(f[equal] - lb[equal]) 

366 c_ineq.append(f[less] - ub[less]) 

367 c_ineq.append(lb[greater] - f[greater]) 

368 c_ineq.append(f[interval] - ub[interval]) 

369 c_ineq.append(lb[interval] - f[interval]) 

370 

371 J_eq.append(J[equal]) 

372 J_ineq.append(J[less]) 

373 J_ineq.append(-J[greater]) 

374 J_ineq.append(J[interval]) 

375 J_ineq.append(-J[interval]) 

376 

377 c_eq = np.hstack(c_eq) if c_eq else np.empty(0) 

378 c_ineq = np.hstack(c_ineq) if c_ineq else np.empty(0) 

379 

380 if sparse_jacobian: 

381 vstack = sps.vstack 

382 empty = sps.csr_matrix((0, n)) 

383 else: 

384 vstack = np.vstack 

385 empty = np.empty((0, n)) 

386 

387 J_eq = vstack(J_eq) if J_eq else empty 

388 J_ineq = vstack(J_ineq) if J_ineq else empty 

389 

390 return c_eq, c_ineq, J_eq, J_ineq