Coverage Report

Created: 2025-12-08 06:13

next uncovered line (L), next uncovered region (R), next uncovered branch (B)
/src/quantlib/ql/experimental/mcbasket/longstaffschwartzmultipathpricer.cpp
Line
Count
Source
1
/* -*- mode: c++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 4 -*- */
2
3
/*
4
 Copyright (C) 2009 Andrea Odetti
5
6
 This file is part of QuantLib, a free-software/open-source library
7
 for financial quantitative analysts and developers - http://quantlib.org/
8
9
 QuantLib is free software: you can redistribute it and/or modify it
10
 under the terms of the QuantLib license.  You should have received a
11
 copy of the license along with this program; if not, please email
12
 <quantlib-dev@lists.sf.net>. The license is also available online at
13
 <https://www.quantlib.org/license.shtml>.
14
15
 This program is distributed in the hope that it will be useful, but WITHOUT
16
 ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
17
 FOR A PARTICULAR PURPOSE.  See the license for more details.
18
*/
19
20
#include <ql/experimental/mcbasket/longstaffschwartzmultipathpricer.hpp>
21
#include <ql/math/generallinearleastsquares.hpp>
22
#include <ql/utilities/tracing.hpp>
23
#include <utility>
24
25
namespace QuantLib {
26
27
    LongstaffSchwartzMultiPathPricer::PathInfo::PathInfo(Size numberOfTimes)
28
0
        : payments(numberOfTimes, 0.0),
29
0
          exercises(numberOfTimes, 0.0),
30
0
          states(numberOfTimes) {
31
0
    }
32
33
0
    Size LongstaffSchwartzMultiPathPricer::PathInfo::pathLength() const {
34
0
        return states.size();
35
0
    }
36
37
38
    LongstaffSchwartzMultiPathPricer::LongstaffSchwartzMultiPathPricer(
39
        const ext::shared_ptr<PathPayoff>& payoff,
40
        const std::vector<Size>& timePositions,
41
        std::vector<Handle<YieldTermStructure> > forwardTermStructures,
42
        Array discounts,
43
        Size polynomialOrder,
44
        LsmBasisSystem::PolynomialType polynomialType)
45
0
    : payoff_(payoff), coeff_(new Array[timePositions.size() - 1]),
46
0
      lowerBounds_(new Real[timePositions.size()]), timePositions_(timePositions),
47
0
      forwardTermStructures_(std::move(forwardTermStructures)), dF_(std::move(discounts)),
48
0
      v_(LsmBasisSystem::multiPathBasisSystem(
49
0
          payoff->basisSystemDimension(), polynomialOrder, polynomialType)) {
50
0
        QL_REQUIRE(   polynomialType == LsmBasisSystem::Monomial
51
0
                   || polynomialType == LsmBasisSystem::Laguerre
52
0
                   || polynomialType == LsmBasisSystem::Hermite
53
0
                   || polynomialType == LsmBasisSystem::Hyperbolic
54
0
                   || polynomialType == LsmBasisSystem::Chebyshev2nd,
55
0
                   "insufficient polynomial type");
56
0
    }
57
58
    /*
59
      Extract the relevant information from the whole path
60
     */
61
    LongstaffSchwartzMultiPathPricer::PathInfo 
62
    LongstaffSchwartzMultiPathPricer::transformPath(const MultiPath& multiPath)
63
0
    const {
64
0
        const Size numberOfAssets = multiPath.assetNumber();
65
0
        const Size numberOfTimes = timePositions_.size();
66
67
0
        Matrix path(numberOfAssets, numberOfTimes, Null<Real>());
68
69
0
        for (Size i = 0; i < numberOfTimes; ++i) {
70
0
            const Size pos = timePositions_[i];
71
0
            for (Size j = 0; j < numberOfAssets; ++j)
72
0
                path[j][i] = multiPath[j][pos];
73
0
        }
74
        
75
0
        PathInfo info(numberOfTimes);
76
77
0
        payoff_->value(path, forwardTermStructures_, info.payments, info.exercises, info.states);
78
79
0
        return info;
80
0
    }
81
82
    Real LongstaffSchwartzMultiPathPricer::operator()(
83
0
                                            const MultiPath& multiPath) const {
84
0
        PathInfo path = transformPath(multiPath);
85
86
0
        if (calibrationPhase_) {
87
            // store paths for the calibration
88
            // only the relevant part
89
0
            paths_.push_back(path);
90
            // result doesn't matter
91
0
            return 0.0;
92
0
        }
93
94
        // exercise at time t, cancels all payment AFTER t
95
96
0
        const Size len = path.pathLength();
97
0
        Real price = 0.0;
98
99
        // this is the last event date
100
0
        {
101
0
            const Real payoff = path.payments[len - 1];
102
0
            const Real exercise = path.exercises[len - 1];
103
0
            const Array & states = path.states[len - 1];
104
0
            const bool canExercise = !states.empty();
105
106
            // at the end the continuation value is 0.0
107
0
            if (canExercise && exercise > 0.0)
108
0
                price += exercise;
109
0
            price += payoff;
110
0
        }
111
112
0
        for (Integer i = len - 2; i >= 0; --i) {
113
0
            price *= dF_[i + 1] / dF_[i];
114
115
0
            const Real exercise = path.exercises[i];
116
117
            /*
118
              coeff_[i].size()
119
              - 0 => never exercise
120
              - v_.size() => use estimated continuation value 
121
                (if > lowerBounds_[i])
122
              - v_.size() + 1 => always exercise
123
124
              In any case if states is empty, no exercise is allowed.
125
             */
126
0
            const Array & states = path.states[i];
127
0
            const bool canExercise = !states.empty();
128
129
0
            if (canExercise) {
130
0
                if (coeff_[i].size() == v_.size() + 1) {   
131
                    // special value always exercise
132
0
                    price = exercise;
133
0
                }
134
0
                else {
135
0
                    if (!coeff_[i].empty() && exercise > lowerBounds_[i]) {
136
                        
137
0
                        Real continuationValue = 0.0;
138
0
                        for (Size l = 0; l < v_.size(); ++l) {
139
0
                            continuationValue += coeff_[i][l] * v_[l](states);
140
0
                        }
141
                        
142
0
                        if (continuationValue < exercise) {
143
0
                            price = exercise;
144
0
                        }
145
0
                    }
146
0
                }
147
0
            }
148
0
            const Real payoff = path.payments[i];
149
0
            price += payoff;
150
0
        }
151
152
0
        return price * dF_[0];
153
0
    }
154
155
0
    void LongstaffSchwartzMultiPathPricer::calibrate() {
156
0
        const Size n = paths_.size(); // number of paths
157
0
        Array prices(n, 0.0), exercise(n, 0.0);
158
159
0
        const Size basisDimension = payoff_->basisSystemDimension();
160
161
0
        const Size len = paths_[0].pathLength();
162
163
        /*
164
          We try to estimate the lower bound of the continuation value,
165
          so that only itm paths contribute to the regression.
166
         */
167
168
0
        for (Size j = 0; j < n; ++j) {
169
0
            const Real payoff = paths_[j].payments[len - 1];
170
0
            const Real exercise = paths_[j].exercises[len - 1];
171
0
            const Array & states = paths_[j].states[len - 1];
172
0
            const bool canExercise = !states.empty();
173
174
            // at the end the continuation value is 0.0
175
0
            if (canExercise && exercise > 0.0)
176
0
                prices[j] += exercise;
177
0
            prices[j] += payoff;
178
0
        }
179
180
0
        lowerBounds_[len - 1] = *std::min_element(prices.begin(), prices.end());
181
182
0
        std::vector<bool> lsExercise(n);
183
184
0
        for (Integer i = len - 2; i >= 0; --i) {
185
0
            std::vector<Real>  y;
186
0
            std::vector<Array> x;
187
188
            // prices are discounted up to time i
189
0
            const Real discountRatio = dF_[i + 1] / dF_[i];
190
0
            prices *= discountRatio;
191
0
            lowerBounds_[i + 1] *= discountRatio;
192
193
            //roll back step
194
0
            for (Size j = 0; j < n; ++j) {
195
0
                exercise[j] = paths_[j].exercises[i];
196
197
                // If states is empty, no exercise in this path
198
                // and the path will not partecipate to the Lesat Square regression
199
200
0
                const Array & states = paths_[j].states[i];
201
0
                QL_REQUIRE(states.empty() || states.size() == basisDimension, 
202
0
                           "Invalid size of basis system");
203
204
                // only paths that could potentially create exercise opportunities
205
                // partecipate to the regression
206
207
                // if exercise is lower than minimum continuation value, no point in considering it
208
0
                if (!states.empty() && exercise[j] > lowerBounds_[i + 1]) {
209
0
                    x.push_back(states);
210
0
                    y.push_back(prices[j]);
211
0
                }
212
0
            }
213
214
0
            if (v_.size() <=  x.size()) {
215
0
                coeff_[i] = GeneralLinearLeastSquares(x, y, v_).coefficients();
216
0
            }
217
0
            else {
218
            // if number of itm paths is smaller then the number of
219
            // calibration functions -> never exercise
220
0
                QL_TRACE("Not enough itm paths: default decision is NEVER");
221
0
                coeff_[i] = Array(0);
222
0
            }
223
224
            /* attempt to avoid static arbitrage given by always or never exercising.
225
226
               always is absolute: regardless of the lowerBoundContinuationValue_ (this could be changed)
227
               but it still honours "canExercise"
228
             */
229
0
            Real sumOptimized = 0.0;
230
0
            Real sumNoExercise = 0.0;
231
0
            Real sumAlwaysExercise = 0.0; // always, if allowed
232
233
0
            for (Size j = 0, k = 0; j < n; ++j) {
234
0
                sumNoExercise += prices[j];
235
0
                lsExercise[j] = false;
236
237
0
                const bool canExercise = !paths_[j].states[i].empty();
238
0
                if (canExercise) {
239
0
                    sumAlwaysExercise += exercise[j];
240
0
                    if (!coeff_[i].empty() && exercise[j] > lowerBounds_[i + 1]) {
241
0
                        Real continuationValue = 0.0;
242
0
                        for (Size l = 0; l < v_.size(); ++l) {
243
0
                            continuationValue += coeff_[i][l] * v_[l](x[k]);
244
0
                        }
245
                        
246
0
                        if (continuationValue < exercise[j]) {
247
0
                            lsExercise[j] = true;
248
0
                        }
249
0
                        ++k;
250
0
                    }
251
0
                }
252
0
                else {
253
0
                    sumAlwaysExercise += prices[j];
254
0
                }
255
256
0
                sumOptimized += lsExercise[j] ? exercise[j] : prices[j];
257
0
            }
258
259
0
            sumOptimized /= n;
260
0
            sumNoExercise /= n;
261
0
            sumAlwaysExercise /= n;
262
263
0
            QL_TRACE(   "Time index: " << i 
264
0
                     << ", LowerBound: " << lowerBounds_[i + 1] 
265
0
                     << ", Optimum: " << sumOptimized 
266
0
                     << ", Continuation: " << sumNoExercise 
267
0
                     << ", Termination: " << sumAlwaysExercise);
268
269
0
            if (  sumOptimized >= sumNoExercise 
270
0
                && sumOptimized >= sumAlwaysExercise) {
271
                
272
0
                QL_TRACE("Accepted LS decision");
273
0
                for (Size j = 0; j < n; ++j) {
274
                    // lsExercise already contains "canExercise"
275
0
                    prices[j] = lsExercise[j] ? exercise[j] : prices[j];
276
0
                }
277
0
            }
278
0
            else if (sumAlwaysExercise > sumNoExercise) {
279
0
                QL_TRACE("Overridden bad LS decision: ALWAYS");
280
0
                for (Size j = 0; j < n; ++j) {
281
0
                    const bool canExercise = !paths_[j].states[i].empty();
282
0
                    prices[j] = canExercise ? exercise[j] : prices[j];
283
0
                }
284
                // special value to indicate always exercise
285
0
                coeff_[i] = Array(v_.size() + 1); 
286
0
            }
287
0
            else {
288
0
                QL_TRACE("Overridden bad LS decision: NEVER");
289
                // prices already contain the continuation value
290
                // special value to indicate never exercise
291
0
                coeff_[i] = Array(0); 
292
0
            }
293
294
            // then we add in any case the payment at time t
295
            // which is made even if cancellation happens at t
296
0
            for (Size j = 0; j < n; ++j) {
297
0
                const Real payoff = paths_[j].payments[i];
298
0
                prices[j] += payoff;
299
0
            }
300
301
0
            lowerBounds_[i] = *std::min_element(prices.begin(), prices.end());
302
0
        }
303
304
        // remove calibration paths
305
0
        paths_.clear();
306
        // entering the calculation phase
307
0
        calibrationPhase_ = false;
308
0
    }
309
}