/src/quantlib/ql/math/optimization/simplex.hpp
Line | Count | Source |
1 | | /* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */ |
2 | | |
3 | | /* |
4 | | Copyright (C) 2006 Ferdinando Ametrano |
5 | | Copyright (C) 2001, 2002, 2003 Sadruddin Rejeb |
6 | | |
7 | | This file is part of QuantLib, a free-software/open-source library |
8 | | for financial quantitative analysts and developers - http://quantlib.org/ |
9 | | |
10 | | QuantLib is free software: you can redistribute it and/or modify it |
11 | | under the terms of the QuantLib license. You should have received a |
12 | | copy of the license along with this program; if not, please email |
13 | | <quantlib-dev@lists.sf.net>. The license is also available online at |
14 | | <https://www.quantlib.org/license.shtml>. |
15 | | |
16 | | This program is distributed in the hope that it will be useful, but WITHOUT |
17 | | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
18 | | FOR A PARTICULAR PURPOSE. See the license for more details. |
19 | | */ |
20 | | |
21 | | /*! \file simplex.hpp |
22 | | \brief Simplex optimization method |
23 | | */ |
24 | | |
25 | | /* The implementation of the algorithm was inspired by |
26 | | * "Numerical Recipes in C", 2nd edition, Press, Teukolsky, Vetterling, Flannery |
27 | | * Chapter 10 |
28 | | */ |
29 | | |
30 | | #ifndef quantlib_optimization_simplex_hpp |
31 | | #define quantlib_optimization_simplex_hpp |
32 | | |
33 | | #include <ql/math/optimization/problem.hpp> |
34 | | #include <vector> |
35 | | |
36 | | namespace QuantLib { |
37 | | |
38 | | //! Multi-dimensional simplex class |
39 | | /*! This method is rather raw and requires quite a lot of |
40 | | computing resources, but it has the advantage that it does not |
41 | | need any evaluation of the cost function's gradient, and that |
42 | | it is quite easily implemented. First, we choose N+1 |
43 | | starting points, given here by a starting point \f$ |
44 | | \mathbf{P}_{0} \f$ and N points such that |
45 | | \f[ |
46 | | \mathbf{P}_{\mathbf{i}}=\mathbf{P}_{0}+\lambda \mathbf{e}_{\mathbf{i}}, |
47 | | \f] |
48 | | where \f$ \lambda \f$ is the problem's characteristic length scale). These |
49 | | points will form a geometrical form called simplex. |
50 | | The principle of the downhill simplex method is, at each |
51 | | iteration, to move the worst point (highest cost function value) |
52 | | through the opposite face to a better point. When the simplex |
53 | | seems to be constrained in a valley, it will be contracted |
54 | | downhill, keeping the best point unchanged. |
55 | | |
56 | | \ingroup optimizers |
57 | | */ |
58 | | class Simplex : public OptimizationMethod { |
59 | | public: |
60 | | /*! Constructor taking as input the characteristic length */ |
61 | 0 | Simplex(Real lambda) : lambda_(lambda) {} |
62 | | EndCriteria::Type minimize(Problem& P, const EndCriteria& endCriteria) override; |
63 | 0 | Real lambda() const { return lambda_; } |
64 | | |
65 | | private: |
66 | | Real extrapolate(Problem& P, |
67 | | Size iHighest, |
68 | | Real& factor) const; |
69 | | Real lambda_; |
70 | | mutable std::vector<Array> vertices_; |
71 | | mutable Array values_, sum_; |
72 | | }; |
73 | | |
74 | | } |
75 | | |
76 | | #endif |