/rust/registry/src/index.crates.io-1949cf8c6b5b557f/pxfm-0.1.28/src/gamma/betainc.rs
Line | Count | Source |
1 | | /* |
2 | | * // Copyright (c) Radzivon Bartoshyk 9/2025. All rights reserved. |
3 | | * // |
4 | | * // Redistribution and use in source and binary forms, with or without modification, |
5 | | * // are permitted provided that the following conditions are met: |
6 | | * // |
7 | | * // 1. Redistributions of source code must retain the above copyright notice, this |
8 | | * // list of conditions and the following disclaimer. |
9 | | * // |
10 | | * // 2. Redistributions in binary form must reproduce the above copyright notice, |
11 | | * // this list of conditions and the following disclaimer in the documentation |
12 | | * // and/or other materials provided with the distribution. |
13 | | * // |
14 | | * // 3. Neither the name of the copyright holder nor the names of its |
15 | | * // contributors may be used to endorse or promote products derived from |
16 | | * // this software without specific prior written permission. |
17 | | * // |
18 | | * // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
19 | | * // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
20 | | * // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE |
21 | | * // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE |
22 | | * // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL |
23 | | * // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR |
24 | | * // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER |
25 | | * // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, |
26 | | * // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
27 | | * // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 | | */ |
29 | | use crate::bessel::i0_exp; |
30 | | use crate::common::f_fmla; |
31 | | use crate::double_double::DoubleDouble; |
32 | | use crate::f_pow; |
33 | | use crate::gamma::lnbeta::lnbeta_core; |
34 | | use crate::logs::{fast_log_dd, log1p_fast_dd}; |
35 | | |
36 | | /// Regularized incomplete beta |
37 | 0 | pub fn f_betainc_reg(a: f64, b: f64, x: f64) -> f64 { |
38 | 0 | let aa = a.to_bits(); |
39 | 0 | let ab = b.to_bits(); |
40 | 0 | let ax = x.to_bits(); |
41 | | |
42 | 0 | if aa >= 0x7ffu64 << 52 |
43 | 0 | || aa == 0 |
44 | 0 | || ab >= 0x7ffu64 << 52 |
45 | 0 | || ab == 0 |
46 | 0 | || ax == 0 |
47 | 0 | || ax >= 0x3ff0000000000000 |
48 | | { |
49 | 0 | if (aa >> 63) != 0 || (ab >> 63) != 0 || (ax >> 63) != 0 { |
50 | | // |a| < 0 or |b| < 0 |
51 | 0 | return f64::NAN; |
52 | 0 | } |
53 | 0 | if ax >= 0x3ff0000000000000 { |
54 | | // |x| > 1 |
55 | 0 | if ax == 0x3ff0000000000000 { |
56 | | // x == 1 |
57 | 0 | return 1.; |
58 | 0 | } |
59 | 0 | return f64::NAN; |
60 | 0 | } |
61 | 0 | if ax.wrapping_shl(1) == 0 { |
62 | | // |x| == 0 |
63 | 0 | return 0.; |
64 | 0 | } |
65 | 0 | if aa.wrapping_shl(1) == 0 { |
66 | | // |a| == 0 |
67 | 0 | return 1.0; |
68 | 0 | } |
69 | 0 | if ab.wrapping_shl(1) == 0 { |
70 | | // |b| == 0 |
71 | 0 | return 0.; |
72 | 0 | } |
73 | 0 | if a.is_infinite() { |
74 | | // |a| == inf |
75 | 0 | return 0.; |
76 | 0 | } |
77 | 0 | if b.is_infinite() { |
78 | | // |b| == inf |
79 | 0 | return 1.; |
80 | 0 | } |
81 | 0 | return a + f64::NAN; // nan |
82 | 0 | } |
83 | | |
84 | 0 | if ab == 0x3ff0000000000000 { |
85 | | // b == 1 |
86 | 0 | return f_pow(x, a); |
87 | 0 | } |
88 | | |
89 | | /*The continued fraction converges nicely for x < (a+1)/(a+b+2)*/ |
90 | | /*Use the fact that beta is symmetrical.*/ |
91 | 0 | let mut return_inverse = false; |
92 | 0 | let mut dx = DoubleDouble::new(0., x); |
93 | 0 | let mut a = a; |
94 | 0 | let mut b = b; |
95 | 0 | if x > (a + 1.0) / (a + b + 2.0) { |
96 | 0 | std::mem::swap(&mut a, &mut b); |
97 | 0 | dx = DoubleDouble::from_full_exact_sub(1.0, x); |
98 | 0 | return_inverse = true; |
99 | 0 | } |
100 | | /*Find the first part before the continued fraction.*/ |
101 | 0 | let da = a; |
102 | 0 | let db = b; |
103 | 0 | let ln_beta_ab = lnbeta_core(a, b); |
104 | 0 | let log_dx = fast_log_dd(dx); |
105 | 0 | let mut log1p_dx = log1p_fast_dd(-dx.hi); |
106 | 0 | log1p_dx.lo += -dx.lo / dx.hi; |
107 | 0 | let z1 = DoubleDouble::mul_f64_add(log1p_dx, db, -ln_beta_ab); |
108 | 0 | let w0 = DoubleDouble::mul_f64_add(log_dx, da, z1); |
109 | 0 | let front = DoubleDouble::div_dd_f64(i0_exp(w0.to_f64()), da); |
110 | | |
111 | | /*Use Lentz's algorithm to evaluate the continued fraction.*/ |
112 | 0 | let mut f = 1.0; |
113 | 0 | let mut c = 1.0; |
114 | 0 | let mut d = 0.0; |
115 | | |
116 | | const TINY: f64 = 1.0e-31; |
117 | | const STOP: f64 = f64::EPSILON; |
118 | | |
119 | 0 | for i in 0..200 { |
120 | 0 | let m = i / 2; |
121 | 0 | let numerator: f64 = if i == 0 { |
122 | 0 | 1.0 /*First numerator is 1.0.*/ |
123 | 0 | } else if i % 2 == 0 { |
124 | 0 | let m = m as f64; |
125 | 0 | let c0 = f_fmla(2.0, m, da); |
126 | 0 | (m * (db - m) * dx.hi) / ((c0 - 1.0) * c0) /*Even term.*/ |
127 | | } else { |
128 | 0 | let m = m as f64; |
129 | 0 | let c0 = f_fmla(2.0, m, da); |
130 | 0 | -((da + m) * (da + db + m) * dx.hi) / (c0 * (c0 + 1.)) /*Odd term.*/ |
131 | | }; |
132 | | |
133 | | /*Do an iteration of Lentz's algorithm.*/ |
134 | 0 | d = f_fmla(numerator, d, 1.0); |
135 | 0 | if d.abs() < TINY { |
136 | 0 | d = TINY; |
137 | 0 | } |
138 | 0 | d = 1.0 / d; |
139 | | |
140 | 0 | c = 1.0 + numerator / c; |
141 | 0 | if c.abs() < TINY { |
142 | 0 | c = TINY; |
143 | 0 | } |
144 | | |
145 | 0 | let cd = c * d; |
146 | 0 | f *= cd; |
147 | | |
148 | | /*Check for stop.*/ |
149 | 0 | if (1.0 - cd).abs() < STOP { |
150 | 0 | let r = DoubleDouble::from_full_exact_sub(f, 1.0); |
151 | 0 | return if return_inverse { |
152 | 0 | DoubleDouble::mul_add_f64(-front, r, 1.).to_f64() |
153 | | } else { |
154 | 0 | DoubleDouble::quick_mult(front, r).to_f64() |
155 | | }; |
156 | 0 | } |
157 | | } |
158 | | |
159 | 0 | let r = DoubleDouble::from_full_exact_sub(f, 1.0); |
160 | 0 | if return_inverse { |
161 | 0 | DoubleDouble::mul_add_f64(-front, r, 1.).to_f64() |
162 | | } else { |
163 | 0 | DoubleDouble::quick_mult(front, r).to_f64() |
164 | | } |
165 | 0 | } |
166 | | |
167 | | #[cfg(test)] |
168 | | mod tests { |
169 | | use super::*; |
170 | | |
171 | | #[test] |
172 | | fn test_betainc() { |
173 | | assert_eq!(f_betainc_reg(0.5, 2.5, 0.5), 0.9244131815783875); |
174 | | assert_eq!(f_betainc_reg(2.5, 1.0, 0.5), 0.1767766952966368811); |
175 | | assert_eq!(f_betainc_reg(0.5, 0., 1.), 1.); |
176 | | assert_eq!(f_betainc_reg(5., 1.4324, 0.1312), 8.872581630413704e-5); |
177 | | assert_eq!(f_betainc_reg(7., 42., 0.4324), 0.9999954480481231); |
178 | | assert_eq!(f_betainc_reg(5., 2., 1.), 1.); |
179 | | assert_eq!(f_betainc_reg(5., 2., 0.), 0.); |
180 | | assert_eq!(f_betainc_reg(5., 2., 0.5), 0.10937500000000006); |
181 | | assert!(f_betainc_reg(5., 2., -1.).is_nan()); |
182 | | assert!(f_betainc_reg(5., 2., 1.1).is_nan()); |
183 | | assert!(f_betainc_reg(5., 2., f64::INFINITY).is_nan()); |
184 | | assert!(f_betainc_reg(5., 2., f64::NEG_INFINITY).is_nan()); |
185 | | assert!(f_betainc_reg(5., 2., f64::NAN).is_nan()); |
186 | | assert!(f_betainc_reg(-5., 2., 0.432).is_nan()); |
187 | | assert!(f_betainc_reg(5., -2., 0.432).is_nan()); |
188 | | assert!(f_betainc_reg(5., 2., -0.432).is_nan()); |
189 | | } |
190 | | } |