Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/scipy/special/_logsumexp.py: 12%

50 statements  

« prev     ^ index     » next       coverage.py v7.3.2, created at 2023-12-12 06:31 +0000

1import numpy as np 

2from scipy._lib._util import _asarray_validated 

3 

4__all__ = ["logsumexp", "softmax", "log_softmax"] 

5 

6 

7def logsumexp(a, axis=None, b=None, keepdims=False, return_sign=False): 

8 """Compute the log of the sum of exponentials of input elements. 

9 

10 Parameters 

11 ---------- 

12 a : array_like 

13 Input array. 

14 axis : None or int or tuple of ints, optional 

15 Axis or axes over which the sum is taken. By default `axis` is None, 

16 and all elements are summed. 

17 

18 .. versionadded:: 0.11.0 

19 b : array-like, optional 

20 Scaling factor for exp(`a`) must be of the same shape as `a` or 

21 broadcastable to `a`. These values may be negative in order to 

22 implement subtraction. 

23 

24 .. versionadded:: 0.12.0 

25 keepdims : bool, optional 

26 If this is set to True, the axes which are reduced are left in the 

27 result as dimensions with size one. With this option, the result 

28 will broadcast correctly against the original array. 

29 

30 .. versionadded:: 0.15.0 

31 return_sign : bool, optional 

32 If this is set to True, the result will be a pair containing sign 

33 information; if False, results that are negative will be returned 

34 as NaN. Default is False (no sign information). 

35 

36 .. versionadded:: 0.16.0 

37 

38 Returns 

39 ------- 

40 res : ndarray 

41 The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically 

42 more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))`` 

43 is returned. 

44 sgn : ndarray 

45 If return_sign is True, this will be an array of floating-point 

46 numbers matching res and +1, 0, or -1 depending on the sign 

47 of the result. If False, only one result is returned. 

48 

49 See Also 

50 -------- 

51 numpy.logaddexp, numpy.logaddexp2 

52 

53 Notes 

54 ----- 

55 NumPy has a logaddexp function which is very similar to `logsumexp`, but 

56 only handles two arguments. `logaddexp.reduce` is similar to this 

57 function, but may be less stable. 

58 

59 Examples 

60 -------- 

61 >>> import numpy as np 

62 >>> from scipy.special import logsumexp 

63 >>> a = np.arange(10) 

64 >>> logsumexp(a) 

65 9.4586297444267107 

66 >>> np.log(np.sum(np.exp(a))) 

67 9.4586297444267107 

68 

69 With weights 

70 

71 >>> a = np.arange(10) 

72 >>> b = np.arange(10, 0, -1) 

73 >>> logsumexp(a, b=b) 

74 9.9170178533034665 

75 >>> np.log(np.sum(b*np.exp(a))) 

76 9.9170178533034647 

77 

78 Returning a sign flag 

79 

80 >>> logsumexp([1,2],b=[1,-1],return_sign=True) 

81 (1.5413248546129181, -1.0) 

82 

83 Notice that `logsumexp` does not directly support masked arrays. To use it 

84 on a masked array, convert the mask into zero weights: 

85 

86 >>> a = np.ma.array([np.log(2), 2, np.log(3)], 

87 ... mask=[False, True, False]) 

88 >>> b = (~a.mask).astype(int) 

89 >>> logsumexp(a.data, b=b), np.log(5) 

90 1.6094379124341005, 1.6094379124341005 

91 

92 """ 

93 a = _asarray_validated(a, check_finite=False) 

94 if b is not None: 

95 a, b = np.broadcast_arrays(a, b) 

96 if np.any(b == 0): 

97 a = a + 0. # promote to at least float 

98 a[b == 0] = -np.inf 

99 

100 a_max = np.amax(a, axis=axis, keepdims=True) 

101 

102 if a_max.ndim > 0: 

103 a_max[~np.isfinite(a_max)] = 0 

104 elif not np.isfinite(a_max): 

105 a_max = 0 

106 

107 if b is not None: 

108 b = np.asarray(b) 

109 tmp = b * np.exp(a - a_max) 

110 else: 

111 tmp = np.exp(a - a_max) 

112 

113 # suppress warnings about log of zero 

114 with np.errstate(divide='ignore'): 

115 s = np.sum(tmp, axis=axis, keepdims=keepdims) 

116 if return_sign: 

117 sgn = np.sign(s) 

118 s *= sgn # /= makes more sense but we need zero -> zero 

119 out = np.log(s) 

120 

121 if not keepdims: 

122 a_max = np.squeeze(a_max, axis=axis) 

123 out += a_max 

124 

125 if return_sign: 

126 return out, sgn 

127 else: 

128 return out 

129 

130 

131def softmax(x, axis=None): 

132 r"""Compute the softmax function. 

133 

134 The softmax function transforms each element of a collection by 

135 computing the exponential of each element divided by the sum of the 

136 exponentials of all the elements. That is, if `x` is a one-dimensional 

137 numpy array:: 

138 

139 softmax(x) = np.exp(x)/sum(np.exp(x)) 

140 

141 Parameters 

142 ---------- 

143 x : array_like 

144 Input array. 

145 axis : int or tuple of ints, optional 

146 Axis to compute values along. Default is None and softmax will be 

147 computed over the entire array `x`. 

148 

149 Returns 

150 ------- 

151 s : ndarray 

152 An array the same shape as `x`. The result will sum to 1 along the 

153 specified axis. 

154 

155 Notes 

156 ----- 

157 The formula for the softmax function :math:`\sigma(x)` for a vector 

158 :math:`x = \{x_0, x_1, ..., x_{n-1}\}` is 

159 

160 .. math:: \sigma(x)_j = \frac{e^{x_j}}{\sum_k e^{x_k}} 

161 

162 The `softmax` function is the gradient of `logsumexp`. 

163 

164 The implementation uses shifting to avoid overflow. See [1]_ for more 

165 details. 

166 

167 .. versionadded:: 1.2.0 

168 

169 References 

170 ---------- 

171 .. [1] P. Blanchard, D.J. Higham, N.J. Higham, "Accurately computing the 

172 log-sum-exp and softmax functions", IMA Journal of Numerical Analysis, 

173 Vol.41(4), :doi:`10.1093/imanum/draa038`. 

174 

175 Examples 

176 -------- 

177 >>> import numpy as np 

178 >>> from scipy.special import softmax 

179 >>> np.set_printoptions(precision=5) 

180 

181 >>> x = np.array([[1, 0.5, 0.2, 3], 

182 ... [1, -1, 7, 3], 

183 ... [2, 12, 13, 3]]) 

184 ... 

185 

186 Compute the softmax transformation over the entire array. 

187 

188 >>> m = softmax(x) 

189 >>> m 

190 array([[ 4.48309e-06, 2.71913e-06, 2.01438e-06, 3.31258e-05], 

191 [ 4.48309e-06, 6.06720e-07, 1.80861e-03, 3.31258e-05], 

192 [ 1.21863e-05, 2.68421e-01, 7.29644e-01, 3.31258e-05]]) 

193 

194 >>> m.sum() 

195 1.0 

196 

197 Compute the softmax transformation along the first axis (i.e., the 

198 columns). 

199 

200 >>> m = softmax(x, axis=0) 

201 

202 >>> m 

203 array([[ 2.11942e-01, 1.01300e-05, 2.75394e-06, 3.33333e-01], 

204 [ 2.11942e-01, 2.26030e-06, 2.47262e-03, 3.33333e-01], 

205 [ 5.76117e-01, 9.99988e-01, 9.97525e-01, 3.33333e-01]]) 

206 

207 >>> m.sum(axis=0) 

208 array([ 1., 1., 1., 1.]) 

209 

210 Compute the softmax transformation along the second axis (i.e., the rows). 

211 

212 >>> m = softmax(x, axis=1) 

213 >>> m 

214 array([[ 1.05877e-01, 6.42177e-02, 4.75736e-02, 7.82332e-01], 

215 [ 2.42746e-03, 3.28521e-04, 9.79307e-01, 1.79366e-02], 

216 [ 1.22094e-05, 2.68929e-01, 7.31025e-01, 3.31885e-05]]) 

217 

218 >>> m.sum(axis=1) 

219 array([ 1., 1., 1.]) 

220 

221 """ 

222 x = _asarray_validated(x, check_finite=False) 

223 x_max = np.amax(x, axis=axis, keepdims=True) 

224 exp_x_shifted = np.exp(x - x_max) 

225 return exp_x_shifted / np.sum(exp_x_shifted, axis=axis, keepdims=True) 

226 

227 

228def log_softmax(x, axis=None): 

229 r"""Compute the logarithm of the softmax function. 

230 

231 In principle:: 

232 

233 log_softmax(x) = log(softmax(x)) 

234 

235 but using a more accurate implementation. 

236 

237 Parameters 

238 ---------- 

239 x : array_like 

240 Input array. 

241 axis : int or tuple of ints, optional 

242 Axis to compute values along. Default is None and softmax will be 

243 computed over the entire array `x`. 

244 

245 Returns 

246 ------- 

247 s : ndarray or scalar 

248 An array with the same shape as `x`. Exponential of the result will 

249 sum to 1 along the specified axis. If `x` is a scalar, a scalar is 

250 returned. 

251 

252 Notes 

253 ----- 

254 `log_softmax` is more accurate than ``np.log(softmax(x))`` with inputs that 

255 make `softmax` saturate (see examples below). 

256 

257 .. versionadded:: 1.5.0 

258 

259 Examples 

260 -------- 

261 >>> import numpy as np 

262 >>> from scipy.special import log_softmax 

263 >>> from scipy.special import softmax 

264 >>> np.set_printoptions(precision=5) 

265 

266 >>> x = np.array([1000.0, 1.0]) 

267 

268 >>> y = log_softmax(x) 

269 >>> y 

270 array([ 0., -999.]) 

271 

272 >>> with np.errstate(divide='ignore'): 

273 ... y = np.log(softmax(x)) 

274 ... 

275 >>> y 

276 array([ 0., -inf]) 

277 

278 """ 

279 

280 x = _asarray_validated(x, check_finite=False) 

281 

282 x_max = np.amax(x, axis=axis, keepdims=True) 

283 

284 if x_max.ndim > 0: 

285 x_max[~np.isfinite(x_max)] = 0 

286 elif not np.isfinite(x_max): 

287 x_max = 0 

288 

289 tmp = x - x_max 

290 exp_tmp = np.exp(tmp) 

291 

292 # suppress warnings about log of zero 

293 with np.errstate(divide='ignore'): 

294 s = np.sum(exp_tmp, axis=axis, keepdims=True) 

295 out = np.log(s) 

296 

297 out = tmp - out 

298 return out