Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/optimize/_trustregion_constr/minimize_trustregion_constr.py: 13%

174 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-12 06:31 +0000

1import time 

2import numpy as np 

3from scipy.sparse.linalg import LinearOperator 

4from .._differentiable_functions import VectorFunction 

5from .._constraints import ( 

6 NonlinearConstraint, LinearConstraint, PreparedConstraint, strict_bounds) 

7from .._hessian_update_strategy import BFGS 

8from .._optimize import OptimizeResult 

9from .._differentiable_functions import ScalarFunction 

10from .equality_constrained_sqp import equality_constrained_sqp 

11from .canonical_constraint import (CanonicalConstraint, 

12 initial_constraints_as_canonical) 

13from .tr_interior_point import tr_interior_point 

14from .report import BasicReport, SQPReport, IPReport 

15 

16 

17TERMINATION_MESSAGES = { 

18 0: "The maximum number of function evaluations is exceeded.", 

19 1: "`gtol` termination condition is satisfied.", 

20 2: "`xtol` termination condition is satisfied.", 

21 3: "`callback` function requested termination." 

22} 

23 

24 

25class HessianLinearOperator: 

26 """Build LinearOperator from hessp""" 

27 def __init__(self, hessp, n): 

28 self.hessp = hessp 

29 self.n = n 

30 

31 def __call__(self, x, *args): 

32 def matvec(p): 

33 return self.hessp(x, p, *args) 

34 

35 return LinearOperator((self.n, self.n), matvec=matvec) 

36 

37 

38class LagrangianHessian: 

39 """The Hessian of the Lagrangian as LinearOperator. 

40 

41 The Lagrangian is computed as the objective function plus all the 

42 constraints multiplied with some numbers (Lagrange multipliers). 

43 """ 

44 def __init__(self, n, objective_hess, constraints_hess): 

45 self.n = n 

46 self.objective_hess = objective_hess 

47 self.constraints_hess = constraints_hess 

48 

49 def __call__(self, x, v_eq=np.empty(0), v_ineq=np.empty(0)): 

50 H_objective = self.objective_hess(x) 

51 H_constraints = self.constraints_hess(x, v_eq, v_ineq) 

52 

53 def matvec(p): 

54 return H_objective.dot(p) + H_constraints.dot(p) 

55 

56 return LinearOperator((self.n, self.n), matvec) 

57 

58 

59def update_state_sqp(state, x, last_iteration_failed, objective, prepared_constraints, 

60 start_time, tr_radius, constr_penalty, cg_info): 

61 state.nit += 1 

62 state.nfev = objective.nfev 

63 state.njev = objective.ngev 

64 state.nhev = objective.nhev 

65 state.constr_nfev = [c.fun.nfev if isinstance(c.fun, VectorFunction) else 0 

66 for c in prepared_constraints] 

67 state.constr_njev = [c.fun.njev if isinstance(c.fun, VectorFunction) else 0 

68 for c in prepared_constraints] 

69 state.constr_nhev = [c.fun.nhev if isinstance(c.fun, VectorFunction) else 0 

70 for c in prepared_constraints] 

71 

72 if not last_iteration_failed: 

73 state.x = x 

74 state.fun = objective.f 

75 state.grad = objective.g 

76 state.v = [c.fun.v for c in prepared_constraints] 

77 state.constr = [c.fun.f for c in prepared_constraints] 

78 state.jac = [c.fun.J for c in prepared_constraints] 

79 # Compute Lagrangian Gradient 

80 state.lagrangian_grad = np.copy(state.grad) 

81 for c in prepared_constraints: 

82 state.lagrangian_grad += c.fun.J.T.dot(c.fun.v) 

83 state.optimality = np.linalg.norm(state.lagrangian_grad, np.inf) 

84 # Compute maximum constraint violation 

85 state.constr_violation = 0 

86 for i in range(len(prepared_constraints)): 

87 lb, ub = prepared_constraints[i].bounds 

88 c = state.constr[i] 

89 state.constr_violation = np.max([state.constr_violation, 

90 np.max(lb - c), 

91 np.max(c - ub)]) 

92 

93 state.execution_time = time.time() - start_time 

94 state.tr_radius = tr_radius 

95 state.constr_penalty = constr_penalty 

96 state.cg_niter += cg_info["niter"] 

97 state.cg_stop_cond = cg_info["stop_cond"] 

98 

99 return state 

100 

101 

102def update_state_ip(state, x, last_iteration_failed, objective, 

103 prepared_constraints, start_time, 

104 tr_radius, constr_penalty, cg_info, 

105 barrier_parameter, barrier_tolerance): 

106 state = update_state_sqp(state, x, last_iteration_failed, objective, 

107 prepared_constraints, start_time, tr_radius, 

108 constr_penalty, cg_info) 

109 state.barrier_parameter = barrier_parameter 

110 state.barrier_tolerance = barrier_tolerance 

111 return state 

112 

113 

114def _minimize_trustregion_constr(fun, x0, args, grad, 

115 hess, hessp, bounds, constraints, 

116 xtol=1e-8, gtol=1e-8, 

117 barrier_tol=1e-8, 

118 sparse_jacobian=None, 

119 callback=None, maxiter=1000, 

120 verbose=0, finite_diff_rel_step=None, 

121 initial_constr_penalty=1.0, initial_tr_radius=1.0, 

122 initial_barrier_parameter=0.1, 

123 initial_barrier_tolerance=0.1, 

124 factorization_method=None, 

125 disp=False): 

126 """Minimize a scalar function subject to constraints. 

127 

128 Parameters 

129 ---------- 

130 gtol : float, optional 

131 Tolerance for termination by the norm of the Lagrangian gradient. 

132 The algorithm will terminate when both the infinity norm (i.e., max 

133 abs value) of the Lagrangian gradient and the constraint violation 

134 are smaller than ``gtol``. Default is 1e-8. 

135 xtol : float, optional 

136 Tolerance for termination by the change of the independent variable. 

137 The algorithm will terminate when ``tr_radius < xtol``, where 

138 ``tr_radius`` is the radius of the trust region used in the algorithm. 

139 Default is 1e-8. 

140 barrier_tol : float, optional 

141 Threshold on the barrier parameter for the algorithm termination. 

142 When inequality constraints are present, the algorithm will terminate 

143 only when the barrier parameter is less than `barrier_tol`. 

144 Default is 1e-8. 

145 sparse_jacobian : {bool, None}, optional 

146 Determines how to represent Jacobians of the constraints. If bool, 

147 then Jacobians of all the constraints will be converted to the 

148 corresponding format. If None (default), then Jacobians won't be 

149 converted, but the algorithm can proceed only if they all have the 

150 same format. 

151 initial_tr_radius: float, optional 

152 Initial trust radius. The trust radius gives the maximum distance 

153 between solution points in consecutive iterations. It reflects the 

154 trust the algorithm puts in the local approximation of the optimization 

155 problem. For an accurate local approximation the trust-region should be 

156 large and for an approximation valid only close to the current point it 

157 should be a small one. The trust radius is automatically updated throughout 

158 the optimization process, with ``initial_tr_radius`` being its initial value. 

159 Default is 1 (recommended in [1]_, p. 19). 

160 initial_constr_penalty : float, optional 

161 Initial constraints penalty parameter. The penalty parameter is used for 

162 balancing the requirements of decreasing the objective function 

163 and satisfying the constraints. It is used for defining the merit function: 

164 ``merit_function(x) = fun(x) + constr_penalty * constr_norm_l2(x)``, 

165 where ``constr_norm_l2(x)`` is the l2 norm of a vector containing all 

166 the constraints. The merit function is used for accepting or rejecting 

167 trial points and ``constr_penalty`` weights the two conflicting goals 

168 of reducing objective function and constraints. The penalty is automatically 

169 updated throughout the optimization process, with 

170 ``initial_constr_penalty`` being its initial value. Default is 1 

171 (recommended in [1]_, p 19). 

172 initial_barrier_parameter, initial_barrier_tolerance: float, optional 

173 Initial barrier parameter and initial tolerance for the barrier subproblem. 

174 Both are used only when inequality constraints are present. For dealing with 

175 optimization problems ``min_x f(x)`` subject to inequality constraints 

176 ``c(x) <= 0`` the algorithm introduces slack variables, solving the problem 

177 ``min_(x,s) f(x) + barrier_parameter*sum(ln(s))`` subject to the equality 

178 constraints ``c(x) + s = 0`` instead of the original problem. This subproblem 

179 is solved for decreasing values of ``barrier_parameter`` and with decreasing 

180 tolerances for the termination, starting with ``initial_barrier_parameter`` 

181 for the barrier parameter and ``initial_barrier_tolerance`` for the 

182 barrier tolerance. Default is 0.1 for both values (recommended in [1]_ p. 19). 

183 Also note that ``barrier_parameter`` and ``barrier_tolerance`` are updated 

184 with the same prefactor. 

185 factorization_method : string or None, optional 

186 Method to factorize the Jacobian of the constraints. Use None (default) 

187 for the auto selection or one of: 

188 

189 - 'NormalEquation' (requires scikit-sparse) 

190 - 'AugmentedSystem' 

191 - 'QRFactorization' 

192 - 'SVDFactorization' 

193 

194 The methods 'NormalEquation' and 'AugmentedSystem' can be used only 

195 with sparse constraints. The projections required by the algorithm 

196 will be computed using, respectively, the normal equation and the 

197 augmented system approaches explained in [1]_. 'NormalEquation' 

198 computes the Cholesky factorization of ``A A.T`` and 'AugmentedSystem' 

199 performs the LU factorization of an augmented system. They usually 

200 provide similar results. 'AugmentedSystem' is used by default for 

201 sparse matrices. 

202 

203 The methods 'QRFactorization' and 'SVDFactorization' can be used 

204 only with dense constraints. They compute the required projections 

205 using, respectively, QR and SVD factorizations. The 'SVDFactorization' 

206 method can cope with Jacobian matrices with deficient row rank and will 

207 be used whenever other factorization methods fail (which may imply the 

208 conversion of sparse matrices to a dense format when required). 

209 By default, 'QRFactorization' is used for dense matrices. 

210 finite_diff_rel_step : None or array_like, optional 

211 Relative step size for the finite difference approximation. 

212 maxiter : int, optional 

213 Maximum number of algorithm iterations. Default is 1000. 

214 verbose : {0, 1, 2}, optional 

215 Level of algorithm's verbosity: 

216 

217 * 0 (default) : work silently. 

218 * 1 : display a termination report. 

219 * 2 : display progress during iterations. 

220 * 3 : display progress during iterations (more complete report). 

221 

222 disp : bool, optional 

223 If True (default), then `verbose` will be set to 1 if it was 0. 

224 

225 Returns 

226 ------- 

227 `OptimizeResult` with the fields documented below. Note the following: 

228 

229 1. All values corresponding to the constraints are ordered as they 

230 were passed to the solver. And values corresponding to `bounds` 

231 constraints are put *after* other constraints. 

232 2. All numbers of function, Jacobian or Hessian evaluations correspond 

233 to numbers of actual Python function calls. It means, for example, 

234 that if a Jacobian is estimated by finite differences, then the 

235 number of Jacobian evaluations will be zero and the number of 

236 function evaluations will be incremented by all calls during the 

237 finite difference estimation. 

238 

239 x : ndarray, shape (n,) 

240 Solution found. 

241 optimality : float 

242 Infinity norm of the Lagrangian gradient at the solution. 

243 constr_violation : float 

244 Maximum constraint violation at the solution. 

245 fun : float 

246 Objective function at the solution. 

247 grad : ndarray, shape (n,) 

248 Gradient of the objective function at the solution. 

249 lagrangian_grad : ndarray, shape (n,) 

250 Gradient of the Lagrangian function at the solution. 

251 nit : int 

252 Total number of iterations. 

253 nfev : integer 

254 Number of the objective function evaluations. 

255 njev : integer 

256 Number of the objective function gradient evaluations. 

257 nhev : integer 

258 Number of the objective function Hessian evaluations. 

259 cg_niter : int 

260 Total number of the conjugate gradient method iterations. 

261 method : {'equality_constrained_sqp', 'tr_interior_point'} 

262 Optimization method used. 

263 constr : list of ndarray 

264 List of constraint values at the solution. 

265 jac : list of {ndarray, sparse matrix} 

266 List of the Jacobian matrices of the constraints at the solution. 

267 v : list of ndarray 

268 List of the Lagrange multipliers for the constraints at the solution. 

269 For an inequality constraint a positive multiplier means that the upper 

270 bound is active, a negative multiplier means that the lower bound is 

271 active and if a multiplier is zero it means the constraint is not 

272 active. 

273 constr_nfev : list of int 

274 Number of constraint evaluations for each of the constraints. 

275 constr_njev : list of int 

276 Number of Jacobian matrix evaluations for each of the constraints. 

277 constr_nhev : list of int 

278 Number of Hessian evaluations for each of the constraints. 

279 tr_radius : float 

280 Radius of the trust region at the last iteration. 

281 constr_penalty : float 

282 Penalty parameter at the last iteration, see `initial_constr_penalty`. 

283 barrier_tolerance : float 

284 Tolerance for the barrier subproblem at the last iteration. 

285 Only for problems with inequality constraints. 

286 barrier_parameter : float 

287 Barrier parameter at the last iteration. Only for problems 

288 with inequality constraints. 

289 execution_time : float 

290 Total execution time. 

291 message : str 

292 Termination message. 

293 status : {0, 1, 2, 3} 

294 Termination status: 

295 

296 * 0 : The maximum number of function evaluations is exceeded. 

297 * 1 : `gtol` termination condition is satisfied. 

298 * 2 : `xtol` termination condition is satisfied. 

299 * 3 : `callback` function requested termination. 

300 

301 cg_stop_cond : int 

302 Reason for CG subproblem termination at the last iteration: 

303 

304 * 0 : CG subproblem not evaluated. 

305 * 1 : Iteration limit was reached. 

306 * 2 : Reached the trust-region boundary. 

307 * 3 : Negative curvature detected. 

308 * 4 : Tolerance was satisfied. 

309 

310 References 

311 ---------- 

312 .. [1] Conn, A. R., Gould, N. I., & Toint, P. L. 

313 Trust region methods. 2000. Siam. pp. 19. 

314 """ 

315 x0 = np.atleast_1d(x0).astype(float) 

316 n_vars = np.size(x0) 

317 if hess is None: 

318 if callable(hessp): 

319 hess = HessianLinearOperator(hessp, n_vars) 

320 else: 

321 hess = BFGS() 

322 if disp and verbose == 0: 

323 verbose = 1 

324 

325 if bounds is not None: 

326 finite_diff_bounds = strict_bounds(bounds.lb, bounds.ub, 

327 bounds.keep_feasible, n_vars) 

328 else: 

329 finite_diff_bounds = (-np.inf, np.inf) 

330 

331 # Define Objective Function 

332 objective = ScalarFunction(fun, x0, args, grad, hess, 

333 finite_diff_rel_step, finite_diff_bounds) 

334 

335 # Put constraints in list format when needed. 

336 if isinstance(constraints, (NonlinearConstraint, LinearConstraint)): 

337 constraints = [constraints] 

338 

339 # Prepare constraints. 

340 prepared_constraints = [ 

341 PreparedConstraint(c, x0, sparse_jacobian, finite_diff_bounds) 

342 for c in constraints] 

343 

344 # Check that all constraints are either sparse or dense. 

345 n_sparse = sum(c.fun.sparse_jacobian for c in prepared_constraints) 

346 if 0 < n_sparse < len(prepared_constraints): 

347 raise ValueError("All constraints must have the same kind of the " 

348 "Jacobian --- either all sparse or all dense. " 

349 "You can set the sparsity globally by setting " 

350 "`sparse_jacobian` to either True of False.") 

351 if prepared_constraints: 

352 sparse_jacobian = n_sparse > 0 

353 

354 if bounds is not None: 

355 if sparse_jacobian is None: 

356 sparse_jacobian = True 

357 prepared_constraints.append(PreparedConstraint(bounds, x0, 

358 sparse_jacobian)) 

359 

360 # Concatenate initial constraints to the canonical form. 

361 c_eq0, c_ineq0, J_eq0, J_ineq0 = initial_constraints_as_canonical( 

362 n_vars, prepared_constraints, sparse_jacobian) 

363 

364 # Prepare all canonical constraints and concatenate it into one. 

365 canonical_all = [CanonicalConstraint.from_PreparedConstraint(c) 

366 for c in prepared_constraints] 

367 

368 if len(canonical_all) == 0: 

369 canonical = CanonicalConstraint.empty(n_vars) 

370 elif len(canonical_all) == 1: 

371 canonical = canonical_all[0] 

372 else: 

373 canonical = CanonicalConstraint.concatenate(canonical_all, 

374 sparse_jacobian) 

375 

376 # Generate the Hessian of the Lagrangian. 

377 lagrangian_hess = LagrangianHessian(n_vars, objective.hess, canonical.hess) 

378 

379 # Choose appropriate method 

380 if canonical.n_ineq == 0: 

381 method = 'equality_constrained_sqp' 

382 else: 

383 method = 'tr_interior_point' 

384 

385 # Construct OptimizeResult 

386 state = OptimizeResult( 

387 nit=0, nfev=0, njev=0, nhev=0, 

388 cg_niter=0, cg_stop_cond=0, 

389 fun=objective.f, grad=objective.g, 

390 lagrangian_grad=np.copy(objective.g), 

391 constr=[c.fun.f for c in prepared_constraints], 

392 jac=[c.fun.J for c in prepared_constraints], 

393 constr_nfev=[0 for c in prepared_constraints], 

394 constr_njev=[0 for c in prepared_constraints], 

395 constr_nhev=[0 for c in prepared_constraints], 

396 v=[c.fun.v for c in prepared_constraints], 

397 method=method) 

398 

399 # Start counting 

400 start_time = time.time() 

401 

402 # Define stop criteria 

403 if method == 'equality_constrained_sqp': 

404 def stop_criteria(state, x, last_iteration_failed, 

405 optimality, constr_violation, 

406 tr_radius, constr_penalty, cg_info): 

407 state = update_state_sqp(state, x, last_iteration_failed, 

408 objective, prepared_constraints, 

409 start_time, tr_radius, constr_penalty, 

410 cg_info) 

411 if verbose == 2: 

412 BasicReport.print_iteration(state.nit, 

413 state.nfev, 

414 state.cg_niter, 

415 state.fun, 

416 state.tr_radius, 

417 state.optimality, 

418 state.constr_violation) 

419 elif verbose > 2: 

420 SQPReport.print_iteration(state.nit, 

421 state.nfev, 

422 state.cg_niter, 

423 state.fun, 

424 state.tr_radius, 

425 state.optimality, 

426 state.constr_violation, 

427 state.constr_penalty, 

428 state.cg_stop_cond) 

429 state.status = None 

430 state.niter = state.nit # Alias for callback (backward-compatibility) 

431 if callback is not None and callback(np.copy(state.x), state): 

432 state.status = 3 

433 elif state.optimality < gtol and state.constr_violation < gtol: 

434 state.status = 1 

435 elif state.tr_radius < xtol: 

436 state.status = 2 

437 elif state.nit >= maxiter: 

438 state.status = 0 

439 return state.status in (0, 1, 2, 3) 

440 elif method == 'tr_interior_point': 

441 def stop_criteria(state, x, last_iteration_failed, tr_radius, 

442 constr_penalty, cg_info, barrier_parameter, 

443 barrier_tolerance): 

444 state = update_state_ip(state, x, last_iteration_failed, 

445 objective, prepared_constraints, 

446 start_time, tr_radius, constr_penalty, 

447 cg_info, barrier_parameter, barrier_tolerance) 

448 if verbose == 2: 

449 BasicReport.print_iteration(state.nit, 

450 state.nfev, 

451 state.cg_niter, 

452 state.fun, 

453 state.tr_radius, 

454 state.optimality, 

455 state.constr_violation) 

456 elif verbose > 2: 

457 IPReport.print_iteration(state.nit, 

458 state.nfev, 

459 state.cg_niter, 

460 state.fun, 

461 state.tr_radius, 

462 state.optimality, 

463 state.constr_violation, 

464 state.constr_penalty, 

465 state.barrier_parameter, 

466 state.cg_stop_cond) 

467 state.status = None 

468 state.niter = state.nit # Alias for callback (backward compatibility) 

469 if callback is not None and callback(np.copy(state.x), state): 

470 state.status = 3 

471 elif state.optimality < gtol and state.constr_violation < gtol: 

472 state.status = 1 

473 elif (state.tr_radius < xtol 

474 and state.barrier_parameter < barrier_tol): 

475 state.status = 2 

476 elif state.nit >= maxiter: 

477 state.status = 0 

478 return state.status in (0, 1, 2, 3) 

479 

480 if verbose == 2: 

481 BasicReport.print_header() 

482 elif verbose > 2: 

483 if method == 'equality_constrained_sqp': 

484 SQPReport.print_header() 

485 elif method == 'tr_interior_point': 

486 IPReport.print_header() 

487 

488 # Call inferior function to do the optimization 

489 if method == 'equality_constrained_sqp': 

490 def fun_and_constr(x): 

491 f = objective.fun(x) 

492 c_eq, _ = canonical.fun(x) 

493 return f, c_eq 

494 

495 def grad_and_jac(x): 

496 g = objective.grad(x) 

497 J_eq, _ = canonical.jac(x) 

498 return g, J_eq 

499 

500 _, result = equality_constrained_sqp( 

501 fun_and_constr, grad_and_jac, lagrangian_hess, 

502 x0, objective.f, objective.g, 

503 c_eq0, J_eq0, 

504 stop_criteria, state, 

505 initial_constr_penalty, initial_tr_radius, 

506 factorization_method) 

507 

508 elif method == 'tr_interior_point': 

509 _, result = tr_interior_point( 

510 objective.fun, objective.grad, lagrangian_hess, 

511 n_vars, canonical.n_ineq, canonical.n_eq, 

512 canonical.fun, canonical.jac, 

513 x0, objective.f, objective.g, 

514 c_ineq0, J_ineq0, c_eq0, J_eq0, 

515 stop_criteria, 

516 canonical.keep_feasible, 

517 xtol, state, initial_barrier_parameter, 

518 initial_barrier_tolerance, 

519 initial_constr_penalty, initial_tr_radius, 

520 factorization_method) 

521 

522 # Status 3 occurs when the callback function requests termination, 

523 # this is assumed to not be a success. 

524 result.success = True if result.status in (1, 2) else False 

525 result.message = TERMINATION_MESSAGES[result.status] 

526 

527 # Alias (for backward compatibility with 1.1.0) 

528 result.niter = result.nit 

529 

530 if verbose == 2: 

531 BasicReport.print_footer() 

532 elif verbose > 2: 

533 if method == 'equality_constrained_sqp': 

534 SQPReport.print_footer() 

535 elif method == 'tr_interior_point': 

536 IPReport.print_footer() 

537 if verbose >= 1: 

538 print(result.message) 

539 print("Number of iterations: {}, function evaluations: {}, " 

540 "CG iterations: {}, optimality: {:.2e}, " 

541 "constraint violation: {:.2e}, execution time: {:4.2} s." 

542 .format(result.nit, result.nfev, result.cg_niter, 

543 result.optimality, result.constr_violation, 

544 result.execution_time)) 

545 return result