/work/install-coverage/include/opencv4/opencv2/calib3d.hpp
Line | Count | Source (jump to first uncovered line) |
1 | | /*M/////////////////////////////////////////////////////////////////////////////////////// |
2 | | // |
3 | | // IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING. |
4 | | // |
5 | | // By downloading, copying, installing or using the software you agree to this license. |
6 | | // If you do not agree to this license, do not download, install, |
7 | | // copy or use the software. |
8 | | // |
9 | | // |
10 | | // License Agreement |
11 | | // For Open Source Computer Vision Library |
12 | | // |
13 | | // Copyright (C) 2000-2008, Intel Corporation, all rights reserved. |
14 | | // Copyright (C) 2009, Willow Garage Inc., all rights reserved. |
15 | | // Copyright (C) 2013, OpenCV Foundation, all rights reserved. |
16 | | // Third party copyrights are property of their respective owners. |
17 | | // |
18 | | // Redistribution and use in source and binary forms, with or without modification, |
19 | | // are permitted provided that the following conditions are met: |
20 | | // |
21 | | // * Redistribution's of source code must retain the above copyright notice, |
22 | | // this list of conditions and the following disclaimer. |
23 | | // |
24 | | // * Redistribution's in binary form must reproduce the above copyright notice, |
25 | | // this list of conditions and the following disclaimer in the documentation |
26 | | // and/or other materials provided with the distribution. |
27 | | // |
28 | | // * The name of the copyright holders may not be used to endorse or promote products |
29 | | // derived from this software without specific prior written permission. |
30 | | // |
31 | | // This software is provided by the copyright holders and contributors "as is" and |
32 | | // any express or implied warranties, including, but not limited to, the implied |
33 | | // warranties of merchantability and fitness for a particular purpose are disclaimed. |
34 | | // In no event shall the Intel Corporation or contributors be liable for any direct, |
35 | | // indirect, incidental, special, exemplary, or consequential damages |
36 | | // (including, but not limited to, procurement of substitute goods or services; |
37 | | // loss of use, data, or profits; or business interruption) however caused |
38 | | // and on any theory of liability, whether in contract, strict liability, |
39 | | // or tort (including negligence or otherwise) arising in any way out of |
40 | | // the use of this software, even if advised of the possibility of such damage. |
41 | | // |
42 | | //M*/ |
43 | | |
44 | | #ifndef OPENCV_CALIB3D_HPP |
45 | | #define OPENCV_CALIB3D_HPP |
46 | | |
47 | | #include "opencv2/core.hpp" |
48 | | #include "opencv2/core/types.hpp" |
49 | | #include "opencv2/features2d.hpp" |
50 | | #include "opencv2/core/affine.hpp" |
51 | | |
52 | | /** |
53 | | @defgroup calib3d Camera Calibration and 3D Reconstruction |
54 | | |
55 | | The functions in this section use a so-called pinhole camera model. The view of a scene |
56 | | is obtained by projecting a scene's 3D point \f$P_w\f$ into the image plane using a perspective |
57 | | transformation which forms the corresponding pixel \f$p\f$. Both \f$P_w\f$ and \f$p\f$ are |
58 | | represented in homogeneous coordinates, i.e. as 3D and 2D homogeneous vector respectively. You will |
59 | | find a brief introduction to projective geometry, homogeneous vectors and homogeneous |
60 | | transformations at the end of this section's introduction. For more succinct notation, we often drop |
61 | | the 'homogeneous' and say vector instead of homogeneous vector. |
62 | | |
63 | | The distortion-free projective transformation given by a pinhole camera model is shown below. |
64 | | |
65 | | \f[s \; p = A \begin{bmatrix} R|t \end{bmatrix} P_w,\f] |
66 | | |
67 | | where \f$P_w\f$ is a 3D point expressed with respect to the world coordinate system, |
68 | | \f$p\f$ is a 2D pixel in the image plane, \f$A\f$ is the camera intrinsic matrix, |
69 | | \f$R\f$ and \f$t\f$ are the rotation and translation that describe the change of coordinates from |
70 | | world to camera coordinate systems (or camera frame) and \f$s\f$ is the projective transformation's |
71 | | arbitrary scaling and not part of the camera model. |
72 | | |
73 | | The camera intrinsic matrix \f$A\f$ (notation used as in @cite Zhang2000 and also generally notated |
74 | | as \f$K\f$) projects 3D points given in the camera coordinate system to 2D pixel coordinates, i.e. |
75 | | |
76 | | \f[p = A P_c.\f] |
77 | | |
78 | | The camera intrinsic matrix \f$A\f$ is composed of the focal lengths \f$f_x\f$ and \f$f_y\f$, which are |
79 | | expressed in pixel units, and the principal point \f$(c_x, c_y)\f$, that is usually close to the |
80 | | image center: |
81 | | |
82 | | \f[A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1},\f] |
83 | | |
84 | | and thus |
85 | | |
86 | | \f[s \vecthree{u}{v}{1} = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1} \vecthree{X_c}{Y_c}{Z_c}.\f] |
87 | | |
88 | | The matrix of intrinsic parameters does not depend on the scene viewed. So, once estimated, it can |
89 | | be re-used as long as the focal length is fixed (in case of a zoom lens). Thus, if an image from the |
90 | | camera is scaled by a factor, all of these parameters need to be scaled (multiplied/divided, |
91 | | respectively) by the same factor. |
92 | | |
93 | | The joint rotation-translation matrix \f$[R|t]\f$ is the matrix product of a projective |
94 | | transformation and a homogeneous transformation. The 3-by-4 projective transformation maps 3D points |
95 | | represented in camera coordinates to 2D points in the image plane and represented in normalized |
96 | | camera coordinates \f$x' = X_c / Z_c\f$ and \f$y' = Y_c / Z_c\f$: |
97 | | |
98 | | \f[Z_c \begin{bmatrix} |
99 | | x' \\ |
100 | | y' \\ |
101 | | 1 |
102 | | \end{bmatrix} = \begin{bmatrix} |
103 | | 1 & 0 & 0 & 0 \\ |
104 | | 0 & 1 & 0 & 0 \\ |
105 | | 0 & 0 & 1 & 0 |
106 | | \end{bmatrix} |
107 | | \begin{bmatrix} |
108 | | X_c \\ |
109 | | Y_c \\ |
110 | | Z_c \\ |
111 | | 1 |
112 | | \end{bmatrix}.\f] |
113 | | |
114 | | The homogeneous transformation is encoded by the extrinsic parameters \f$R\f$ and \f$t\f$ and |
115 | | represents the change of basis from world coordinate system \f$w\f$ to the camera coordinate sytem |
116 | | \f$c\f$. Thus, given the representation of the point \f$P\f$ in world coordinates, \f$P_w\f$, we |
117 | | obtain \f$P\f$'s representation in the camera coordinate system, \f$P_c\f$, by |
118 | | |
119 | | \f[P_c = \begin{bmatrix} |
120 | | R & t \\ |
121 | | 0 & 1 |
122 | | \end{bmatrix} P_w,\f] |
123 | | |
124 | | This homogeneous transformation is composed out of \f$R\f$, a 3-by-3 rotation matrix, and \f$t\f$, a |
125 | | 3-by-1 translation vector: |
126 | | |
127 | | \f[\begin{bmatrix} |
128 | | R & t \\ |
129 | | 0 & 1 |
130 | | \end{bmatrix} = \begin{bmatrix} |
131 | | r_{11} & r_{12} & r_{13} & t_x \\ |
132 | | r_{21} & r_{22} & r_{23} & t_y \\ |
133 | | r_{31} & r_{32} & r_{33} & t_z \\ |
134 | | 0 & 0 & 0 & 1 |
135 | | \end{bmatrix}, |
136 | | \f] |
137 | | |
138 | | and therefore |
139 | | |
140 | | \f[\begin{bmatrix} |
141 | | X_c \\ |
142 | | Y_c \\ |
143 | | Z_c \\ |
144 | | 1 |
145 | | \end{bmatrix} = \begin{bmatrix} |
146 | | r_{11} & r_{12} & r_{13} & t_x \\ |
147 | | r_{21} & r_{22} & r_{23} & t_y \\ |
148 | | r_{31} & r_{32} & r_{33} & t_z \\ |
149 | | 0 & 0 & 0 & 1 |
150 | | \end{bmatrix} |
151 | | \begin{bmatrix} |
152 | | X_w \\ |
153 | | Y_w \\ |
154 | | Z_w \\ |
155 | | 1 |
156 | | \end{bmatrix}.\f] |
157 | | |
158 | | Combining the projective transformation and the homogeneous transformation, we obtain the projective |
159 | | transformation that maps 3D points in world coordinates into 2D points in the image plane and in |
160 | | normalized camera coordinates: |
161 | | |
162 | | \f[Z_c \begin{bmatrix} |
163 | | x' \\ |
164 | | y' \\ |
165 | | 1 |
166 | | \end{bmatrix} = \begin{bmatrix} R|t \end{bmatrix} \begin{bmatrix} |
167 | | X_w \\ |
168 | | Y_w \\ |
169 | | Z_w \\ |
170 | | 1 |
171 | | \end{bmatrix} = \begin{bmatrix} |
172 | | r_{11} & r_{12} & r_{13} & t_x \\ |
173 | | r_{21} & r_{22} & r_{23} & t_y \\ |
174 | | r_{31} & r_{32} & r_{33} & t_z |
175 | | \end{bmatrix} |
176 | | \begin{bmatrix} |
177 | | X_w \\ |
178 | | Y_w \\ |
179 | | Z_w \\ |
180 | | 1 |
181 | | \end{bmatrix},\f] |
182 | | |
183 | | with \f$x' = X_c / Z_c\f$ and \f$y' = Y_c / Z_c\f$. Putting the equations for instrincs and extrinsics together, we can write out |
184 | | \f$s \; p = A \begin{bmatrix} R|t \end{bmatrix} P_w\f$ as |
185 | | |
186 | | \f[s \vecthree{u}{v}{1} = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1} |
187 | | \begin{bmatrix} |
188 | | r_{11} & r_{12} & r_{13} & t_x \\ |
189 | | r_{21} & r_{22} & r_{23} & t_y \\ |
190 | | r_{31} & r_{32} & r_{33} & t_z |
191 | | \end{bmatrix} |
192 | | \begin{bmatrix} |
193 | | X_w \\ |
194 | | Y_w \\ |
195 | | Z_w \\ |
196 | | 1 |
197 | | \end{bmatrix}.\f] |
198 | | |
199 | | If \f$Z_c \ne 0\f$, the transformation above is equivalent to the following, |
200 | | |
201 | | \f[\begin{bmatrix} |
202 | | u \\ |
203 | | v |
204 | | \end{bmatrix} = \begin{bmatrix} |
205 | | f_x X_c/Z_c + c_x \\ |
206 | | f_y Y_c/Z_c + c_y |
207 | | \end{bmatrix}\f] |
208 | | |
209 | | with |
210 | | |
211 | | \f[\vecthree{X_c}{Y_c}{Z_c} = \begin{bmatrix} |
212 | | R|t |
213 | | \end{bmatrix} \begin{bmatrix} |
214 | | X_w \\ |
215 | | Y_w \\ |
216 | | Z_w \\ |
217 | | 1 |
218 | | \end{bmatrix}.\f] |
219 | | |
220 | | The following figure illustrates the pinhole camera model. |
221 | | |
222 | |  |
223 | | |
224 | | Real lenses usually have some distortion, mostly radial distortion, and slight tangential distortion. |
225 | | So, the above model is extended as: |
226 | | |
227 | | \f[\begin{bmatrix} |
228 | | u \\ |
229 | | v |
230 | | \end{bmatrix} = \begin{bmatrix} |
231 | | f_x x'' + c_x \\ |
232 | | f_y y'' + c_y |
233 | | \end{bmatrix}\f] |
234 | | |
235 | | where |
236 | | |
237 | | \f[\begin{bmatrix} |
238 | | x'' \\ |
239 | | y'' |
240 | | \end{bmatrix} = \begin{bmatrix} |
241 | | x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + 2 p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4 \\ |
242 | | y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\ |
243 | | \end{bmatrix}\f] |
244 | | |
245 | | with |
246 | | |
247 | | \f[r^2 = x'^2 + y'^2\f] |
248 | | |
249 | | and |
250 | | |
251 | | \f[\begin{bmatrix} |
252 | | x'\\ |
253 | | y' |
254 | | \end{bmatrix} = \begin{bmatrix} |
255 | | X_c/Z_c \\ |
256 | | Y_c/Z_c |
257 | | \end{bmatrix},\f] |
258 | | |
259 | | if \f$Z_c \ne 0\f$. |
260 | | |
261 | | The distortion parameters are the radial coefficients \f$k_1\f$, \f$k_2\f$, \f$k_3\f$, \f$k_4\f$, \f$k_5\f$, and \f$k_6\f$ |
262 | | ,\f$p_1\f$ and \f$p_2\f$ are the tangential distortion coefficients, and \f$s_1\f$, \f$s_2\f$, \f$s_3\f$, and \f$s_4\f$, |
263 | | are the thin prism distortion coefficients. Higher-order coefficients are not considered in OpenCV. |
264 | | |
265 | | The next figures show two common types of radial distortion: barrel distortion |
266 | | (\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically decreasing) |
267 | | and pincushion distortion (\f$ 1 + k_1 r^2 + k_2 r^4 + k_3 r^6 \f$ monotonically increasing). |
268 | | Radial distortion is always monotonic for real lenses, |
269 | | and if the estimator produces a non-monotonic result, |
270 | | this should be considered a calibration failure. |
271 | | More generally, radial distortion must be monotonic and the distortion function must be bijective. |
272 | | A failed estimation result may look deceptively good near the image center |
273 | | but will work poorly in e.g. AR/SFM applications. |
274 | | The optimization method used in OpenCV camera calibration does not include these constraints as |
275 | | the framework does not support the required integer programming and polynomial inequalities. |
276 | | See [issue #15992](https://github.com/opencv/opencv/issues/15992) for additional information. |
277 | | |
278 | |  |
279 | |  |
280 | | |
281 | | In some cases, the image sensor may be tilted in order to focus an oblique plane in front of the |
282 | | camera (Scheimpflug principle). This can be useful for particle image velocimetry (PIV) or |
283 | | triangulation with a laser fan. The tilt causes a perspective distortion of \f$x''\f$ and |
284 | | \f$y''\f$. This distortion can be modeled in the following way, see e.g. @cite Louhichi07. |
285 | | |
286 | | \f[\begin{bmatrix} |
287 | | u \\ |
288 | | v |
289 | | \end{bmatrix} = \begin{bmatrix} |
290 | | f_x x''' + c_x \\ |
291 | | f_y y''' + c_y |
292 | | \end{bmatrix},\f] |
293 | | |
294 | | where |
295 | | |
296 | | \f[s\vecthree{x'''}{y'''}{1} = |
297 | | \vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}(\tau_x, \tau_y)} |
298 | | {0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)} |
299 | | {0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\f] |
300 | | |
301 | | and the matrix \f$R(\tau_x, \tau_y)\f$ is defined by two rotations with angular parameter |
302 | | \f$\tau_x\f$ and \f$\tau_y\f$, respectively, |
303 | | |
304 | | \f[ |
305 | | R(\tau_x, \tau_y) = |
306 | | \vecthreethree{\cos(\tau_y)}{0}{-\sin(\tau_y)}{0}{1}{0}{\sin(\tau_y)}{0}{\cos(\tau_y)} |
307 | | \vecthreethree{1}{0}{0}{0}{\cos(\tau_x)}{\sin(\tau_x)}{0}{-\sin(\tau_x)}{\cos(\tau_x)} = |
308 | | \vecthreethree{\cos(\tau_y)}{\sin(\tau_y)\sin(\tau_x)}{-\sin(\tau_y)\cos(\tau_x)} |
309 | | {0}{\cos(\tau_x)}{\sin(\tau_x)} |
310 | | {\sin(\tau_y)}{-\cos(\tau_y)\sin(\tau_x)}{\cos(\tau_y)\cos(\tau_x)}. |
311 | | \f] |
312 | | |
313 | | In the functions below the coefficients are passed or returned as |
314 | | |
315 | | \f[(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f] |
316 | | |
317 | | vector. That is, if the vector contains four elements, it means that \f$k_3=0\f$ . The distortion |
318 | | coefficients do not depend on the scene viewed. Thus, they also belong to the intrinsic camera |
319 | | parameters. And they remain the same regardless of the captured image resolution. If, for example, a |
320 | | camera has been calibrated on images of 320 x 240 resolution, absolutely the same distortion |
321 | | coefficients can be used for 640 x 480 images from the same camera while \f$f_x\f$, \f$f_y\f$, |
322 | | \f$c_x\f$, and \f$c_y\f$ need to be scaled appropriately. |
323 | | |
324 | | The functions below use the above model to do the following: |
325 | | |
326 | | - Project 3D points to the image plane given intrinsic and extrinsic parameters. |
327 | | - Compute extrinsic parameters given intrinsic parameters, a few 3D points, and their |
328 | | projections. |
329 | | - Estimate intrinsic and extrinsic camera parameters from several views of a known calibration |
330 | | pattern (every view is described by several 3D-2D point correspondences). |
331 | | - Estimate the relative position and orientation of the stereo camera "heads" and compute the |
332 | | *rectification* transformation that makes the camera optical axes parallel. |
333 | | |
334 | | <B> Homogeneous Coordinates </B><br> |
335 | | Homogeneous Coordinates are a system of coordinates that are used in projective geometry. Their use |
336 | | allows to represent points at infinity by finite coordinates and simplifies formulas when compared |
337 | | to the cartesian counterparts, e.g. they have the advantage that affine transformations can be |
338 | | expressed as linear homogeneous transformation. |
339 | | |
340 | | One obtains the homogeneous vector \f$P_h\f$ by appending a 1 along an n-dimensional cartesian |
341 | | vector \f$P\f$ e.g. for a 3D cartesian vector the mapping \f$P \rightarrow P_h\f$ is: |
342 | | |
343 | | \f[\begin{bmatrix} |
344 | | X \\ |
345 | | Y \\ |
346 | | Z |
347 | | \end{bmatrix} \rightarrow \begin{bmatrix} |
348 | | X \\ |
349 | | Y \\ |
350 | | Z \\ |
351 | | 1 |
352 | | \end{bmatrix}.\f] |
353 | | |
354 | | For the inverse mapping \f$P_h \rightarrow P\f$, one divides all elements of the homogeneous vector |
355 | | by its last element, e.g. for a 3D homogeneous vector one gets its 2D cartesian counterpart by: |
356 | | |
357 | | \f[\begin{bmatrix} |
358 | | X \\ |
359 | | Y \\ |
360 | | W |
361 | | \end{bmatrix} \rightarrow \begin{bmatrix} |
362 | | X / W \\ |
363 | | Y / W |
364 | | \end{bmatrix},\f] |
365 | | |
366 | | if \f$W \ne 0\f$. |
367 | | |
368 | | Due to this mapping, all multiples \f$k P_h\f$, for \f$k \ne 0\f$, of a homogeneous point represent |
369 | | the same point \f$P_h\f$. An intuitive understanding of this property is that under a projective |
370 | | transformation, all multiples of \f$P_h\f$ are mapped to the same point. This is the physical |
371 | | observation one does for pinhole cameras, as all points along a ray through the camera's pinhole are |
372 | | projected to the same image point, e.g. all points along the red ray in the image of the pinhole |
373 | | camera model above would be mapped to the same image coordinate. This property is also the source |
374 | | for the scale ambiguity s in the equation of the pinhole camera model. |
375 | | |
376 | | As mentioned, by using homogeneous coordinates we can express any change of basis parameterized by |
377 | | \f$R\f$ and \f$t\f$ as a linear transformation, e.g. for the change of basis from coordinate system |
378 | | 0 to coordinate system 1 becomes: |
379 | | |
380 | | \f[P_1 = R P_0 + t \rightarrow P_{h_1} = \begin{bmatrix} |
381 | | R & t \\ |
382 | | 0 & 1 |
383 | | \end{bmatrix} P_{h_0}.\f] |
384 | | |
385 | | @note |
386 | | - Many functions in this module take a camera intrinsic matrix as an input parameter. Although all |
387 | | functions assume the same structure of this parameter, they may name it differently. The |
388 | | parameter's description, however, will be clear in that a camera intrinsic matrix with the structure |
389 | | shown above is required. |
390 | | - A calibration sample for 3 cameras in a horizontal position can be found at |
391 | | opencv_source_code/samples/cpp/3calibration.cpp |
392 | | - A calibration sample based on a sequence of images can be found at |
393 | | opencv_source_code/samples/cpp/calibration.cpp |
394 | | - A calibration sample in order to do 3D reconstruction can be found at |
395 | | opencv_source_code/samples/cpp/build3dmodel.cpp |
396 | | - A calibration example on stereo calibration can be found at |
397 | | opencv_source_code/samples/cpp/stereo_calib.cpp |
398 | | - A calibration example on stereo matching can be found at |
399 | | opencv_source_code/samples/cpp/stereo_match.cpp |
400 | | - (Python) A camera calibration sample can be found at |
401 | | opencv_source_code/samples/python/calibrate.py |
402 | | |
403 | | @{ |
404 | | @defgroup calib3d_fisheye Fisheye camera model |
405 | | |
406 | | Definitions: Let P be a point in 3D of coordinates X in the world reference frame (stored in the |
407 | | matrix X) The coordinate vector of P in the camera reference frame is: |
408 | | |
409 | | \f[Xc = R X + T\f] |
410 | | |
411 | | where R is the rotation matrix corresponding to the rotation vector om: R = rodrigues(om); call x, y |
412 | | and z the 3 coordinates of Xc: |
413 | | |
414 | | \f[x = Xc_1 \\ y = Xc_2 \\ z = Xc_3\f] |
415 | | |
416 | | The pinhole projection coordinates of P is [a; b] where |
417 | | |
418 | | \f[a = x / z \ and \ b = y / z \\ r^2 = a^2 + b^2 \\ \theta = atan(r)\f] |
419 | | |
420 | | Fisheye distortion: |
421 | | |
422 | | \f[\theta_d = \theta (1 + k_1 \theta^2 + k_2 \theta^4 + k_3 \theta^6 + k_4 \theta^8)\f] |
423 | | |
424 | | The distorted point coordinates are [x'; y'] where |
425 | | |
426 | | \f[x' = (\theta_d / r) a \\ y' = (\theta_d / r) b \f] |
427 | | |
428 | | Finally, conversion into pixel coordinates: The final pixel coordinates vector [u; v] where: |
429 | | |
430 | | \f[u = f_x (x' + \alpha y') + c_x \\ |
431 | | v = f_y y' + c_y\f] |
432 | | |
433 | | Summary: |
434 | | Generic camera model @cite Kannala2006 with perspective projection and without distortion correction |
435 | | |
436 | | @defgroup calib3d_c C API |
437 | | |
438 | | @} |
439 | | */ |
440 | | |
441 | | namespace cv |
442 | | { |
443 | | |
444 | | //! @addtogroup calib3d |
445 | | //! @{ |
446 | | |
447 | | //! type of the robust estimation algorithm |
448 | | enum { LMEDS = 4, //!< least-median of squares algorithm |
449 | | RANSAC = 8, //!< RANSAC algorithm |
450 | | RHO = 16, //!< RHO algorithm |
451 | | USAC_DEFAULT = 32, //!< USAC algorithm, default settings |
452 | | USAC_PARALLEL = 33, //!< USAC, parallel version |
453 | | USAC_FM_8PTS = 34, //!< USAC, fundamental matrix 8 points |
454 | | USAC_FAST = 35, //!< USAC, fast settings |
455 | | USAC_ACCURATE = 36, //!< USAC, accurate settings |
456 | | USAC_PROSAC = 37, //!< USAC, sorted points, runs PROSAC |
457 | | USAC_MAGSAC = 38 //!< USAC, runs MAGSAC++ |
458 | | }; |
459 | | |
460 | | enum SolvePnPMethod { |
461 | | SOLVEPNP_ITERATIVE = 0, //!< Pose refinement using non-linear Levenberg-Marquardt minimization scheme @cite Madsen04 @cite Eade13 \n |
462 | | //!< Initial solution for non-planar "objectPoints" needs at least 6 points and uses the DLT algorithm. \n |
463 | | //!< Initial solution for planar "objectPoints" needs at least 4 points and uses pose from homography decomposition. |
464 | | SOLVEPNP_EPNP = 1, //!< EPnP: Efficient Perspective-n-Point Camera Pose Estimation @cite lepetit2009epnp |
465 | | SOLVEPNP_P3P = 2, //!< Complete Solution Classification for the Perspective-Three-Point Problem @cite gao2003complete |
466 | | SOLVEPNP_DLS = 3, //!< **Broken implementation. Using this flag will fallback to EPnP.** \n |
467 | | //!< A Direct Least-Squares (DLS) Method for PnP @cite hesch2011direct |
468 | | SOLVEPNP_UPNP = 4, //!< **Broken implementation. Using this flag will fallback to EPnP.** \n |
469 | | //!< Exhaustive Linearization for Robust Camera Pose and Focal Length Estimation @cite penate2013exhaustive |
470 | | SOLVEPNP_AP3P = 5, //!< An Efficient Algebraic Solution to the Perspective-Three-Point Problem @cite Ke17 |
471 | | SOLVEPNP_IPPE = 6, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n |
472 | | //!< Object points must be coplanar. |
473 | | SOLVEPNP_IPPE_SQUARE = 7, //!< Infinitesimal Plane-Based Pose Estimation @cite Collins14 \n |
474 | | //!< This is a special case suitable for marker pose estimation.\n |
475 | | //!< 4 coplanar object points must be defined in the following order: |
476 | | //!< - point 0: [-squareLength / 2, squareLength / 2, 0] |
477 | | //!< - point 1: [ squareLength / 2, squareLength / 2, 0] |
478 | | //!< - point 2: [ squareLength / 2, -squareLength / 2, 0] |
479 | | //!< - point 3: [-squareLength / 2, -squareLength / 2, 0] |
480 | | SOLVEPNP_SQPNP = 8, //!< SQPnP: A Consistently Fast and Globally OptimalSolution to the Perspective-n-Point Problem @cite Terzakis2020SQPnP |
481 | | #ifndef CV_DOXYGEN |
482 | | SOLVEPNP_MAX_COUNT //!< Used for count |
483 | | #endif |
484 | | }; |
485 | | |
486 | | enum { CALIB_CB_ADAPTIVE_THRESH = 1, |
487 | | CALIB_CB_NORMALIZE_IMAGE = 2, |
488 | | CALIB_CB_FILTER_QUADS = 4, |
489 | | CALIB_CB_FAST_CHECK = 8, |
490 | | CALIB_CB_EXHAUSTIVE = 16, |
491 | | CALIB_CB_ACCURACY = 32, |
492 | | CALIB_CB_LARGER = 64, |
493 | | CALIB_CB_MARKER = 128 |
494 | | }; |
495 | | |
496 | | enum { CALIB_CB_SYMMETRIC_GRID = 1, |
497 | | CALIB_CB_ASYMMETRIC_GRID = 2, |
498 | | CALIB_CB_CLUSTERING = 4 |
499 | | }; |
500 | | |
501 | | enum { CALIB_NINTRINSIC = 18, |
502 | | CALIB_USE_INTRINSIC_GUESS = 0x00001, |
503 | | CALIB_FIX_ASPECT_RATIO = 0x00002, |
504 | | CALIB_FIX_PRINCIPAL_POINT = 0x00004, |
505 | | CALIB_ZERO_TANGENT_DIST = 0x00008, |
506 | | CALIB_FIX_FOCAL_LENGTH = 0x00010, |
507 | | CALIB_FIX_K1 = 0x00020, |
508 | | CALIB_FIX_K2 = 0x00040, |
509 | | CALIB_FIX_K3 = 0x00080, |
510 | | CALIB_FIX_K4 = 0x00800, |
511 | | CALIB_FIX_K5 = 0x01000, |
512 | | CALIB_FIX_K6 = 0x02000, |
513 | | CALIB_RATIONAL_MODEL = 0x04000, |
514 | | CALIB_THIN_PRISM_MODEL = 0x08000, |
515 | | CALIB_FIX_S1_S2_S3_S4 = 0x10000, |
516 | | CALIB_TILTED_MODEL = 0x40000, |
517 | | CALIB_FIX_TAUX_TAUY = 0x80000, |
518 | | CALIB_USE_QR = 0x100000, //!< use QR instead of SVD decomposition for solving. Faster but potentially less precise |
519 | | CALIB_FIX_TANGENT_DIST = 0x200000, |
520 | | // only for stereo |
521 | | CALIB_FIX_INTRINSIC = 0x00100, |
522 | | CALIB_SAME_FOCAL_LENGTH = 0x00200, |
523 | | // for stereo rectification |
524 | | CALIB_ZERO_DISPARITY = 0x00400, |
525 | | CALIB_USE_LU = (1 << 17), //!< use LU instead of SVD decomposition for solving. much faster but potentially less precise |
526 | | CALIB_USE_EXTRINSIC_GUESS = (1 << 22) //!< for stereoCalibrate |
527 | | }; |
528 | | |
529 | | //! the algorithm for finding fundamental matrix |
530 | | enum { FM_7POINT = 1, //!< 7-point algorithm |
531 | | FM_8POINT = 2, //!< 8-point algorithm |
532 | | FM_LMEDS = 4, //!< least-median algorithm. 7-point algorithm is used. |
533 | | FM_RANSAC = 8 //!< RANSAC algorithm. It needs at least 15 points. 7-point algorithm is used. |
534 | | }; |
535 | | |
536 | | enum HandEyeCalibrationMethod |
537 | | { |
538 | | CALIB_HAND_EYE_TSAI = 0, //!< A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/Eye Calibration @cite Tsai89 |
539 | | CALIB_HAND_EYE_PARK = 1, //!< Robot Sensor Calibration: Solving AX = XB on the Euclidean Group @cite Park94 |
540 | | CALIB_HAND_EYE_HORAUD = 2, //!< Hand-eye Calibration @cite Horaud95 |
541 | | CALIB_HAND_EYE_ANDREFF = 3, //!< On-line Hand-Eye Calibration @cite Andreff99 |
542 | | CALIB_HAND_EYE_DANIILIDIS = 4 //!< Hand-Eye Calibration Using Dual Quaternions @cite Daniilidis98 |
543 | | }; |
544 | | |
545 | | enum RobotWorldHandEyeCalibrationMethod |
546 | | { |
547 | | CALIB_ROBOT_WORLD_HAND_EYE_SHAH = 0, //!< Solving the robot-world/hand-eye calibration problem using the kronecker product @cite Shah2013SolvingTR |
548 | | CALIB_ROBOT_WORLD_HAND_EYE_LI = 1 //!< Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product @cite Li2010SimultaneousRA |
549 | | }; |
550 | | |
551 | | enum SamplingMethod { SAMPLING_UNIFORM, SAMPLING_PROGRESSIVE_NAPSAC, SAMPLING_NAPSAC, |
552 | | SAMPLING_PROSAC }; |
553 | | enum LocalOptimMethod {LOCAL_OPTIM_NULL, LOCAL_OPTIM_INNER_LO, LOCAL_OPTIM_INNER_AND_ITER_LO, |
554 | | LOCAL_OPTIM_GC, LOCAL_OPTIM_SIGMA}; |
555 | | enum ScoreMethod {SCORE_METHOD_RANSAC, SCORE_METHOD_MSAC, SCORE_METHOD_MAGSAC, SCORE_METHOD_LMEDS}; |
556 | | enum NeighborSearchMethod { NEIGH_FLANN_KNN, NEIGH_GRID, NEIGH_FLANN_RADIUS }; |
557 | | |
558 | | struct CV_EXPORTS_W_SIMPLE UsacParams |
559 | | { // in alphabetical order |
560 | | CV_WRAP UsacParams(); |
561 | | CV_PROP_RW double confidence; |
562 | | CV_PROP_RW bool isParallel; |
563 | | CV_PROP_RW int loIterations; |
564 | | CV_PROP_RW LocalOptimMethod loMethod; |
565 | | CV_PROP_RW int loSampleSize; |
566 | | CV_PROP_RW int maxIterations; |
567 | | CV_PROP_RW NeighborSearchMethod neighborsSearch; |
568 | | CV_PROP_RW int randomGeneratorState; |
569 | | CV_PROP_RW SamplingMethod sampler; |
570 | | CV_PROP_RW ScoreMethod score; |
571 | | CV_PROP_RW double threshold; |
572 | | }; |
573 | | |
574 | | /** @brief Converts a rotation matrix to a rotation vector or vice versa. |
575 | | |
576 | | @param src Input rotation vector (3x1 or 1x3) or rotation matrix (3x3). |
577 | | @param dst Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively. |
578 | | @param jacobian Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial |
579 | | derivatives of the output array components with respect to the input array components. |
580 | | |
581 | | \f[\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos(\theta) I + (1- \cos{\theta} ) r r^T + \sin(\theta) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\f] |
582 | | |
583 | | Inverse transformation can be also done easily, since |
584 | | |
585 | | \f[\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\f] |
586 | | |
587 | | A rotation vector is a convenient and most compact representation of a rotation matrix (since any |
588 | | rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry |
589 | | optimization procedures like @ref calibrateCamera, @ref stereoCalibrate, or @ref solvePnP . |
590 | | |
591 | | @note More information about the computation of the derivative of a 3D rotation matrix with respect to its exponential coordinate |
592 | | can be found in: |
593 | | - A Compact Formula for the Derivative of a 3-D Rotation in Exponential Coordinates, Guillermo Gallego, Anthony J. Yezzi @cite Gallego2014ACF |
594 | | |
595 | | @note Useful information on SE(3) and Lie Groups can be found in: |
596 | | - A tutorial on SE(3) transformation parameterizations and on-manifold optimization, Jose-Luis Blanco @cite blanco2010tutorial |
597 | | - Lie Groups for 2D and 3D Transformation, Ethan Eade @cite Eade17 |
598 | | - A micro Lie theory for state estimation in robotics, Joan Solà , Jérémie Deray, Dinesh Atchuthan @cite Sol2018AML |
599 | | */ |
600 | | CV_EXPORTS_W void Rodrigues( InputArray src, OutputArray dst, OutputArray jacobian = noArray() ); |
601 | | |
602 | | |
603 | | |
604 | | /** Levenberg-Marquardt solver. Starting with the specified vector of parameters it |
605 | | optimizes the target vector criteria "err" |
606 | | (finds local minima of each target vector component absolute value). |
607 | | |
608 | | When needed, it calls user-provided callback. |
609 | | */ |
610 | | class CV_EXPORTS LMSolver : public Algorithm |
611 | | { |
612 | | public: |
613 | | class CV_EXPORTS Callback |
614 | | { |
615 | | public: |
616 | 0 | virtual ~Callback() {} |
617 | | /** |
618 | | computes error and Jacobian for the specified vector of parameters |
619 | | |
620 | | @param param the current vector of parameters |
621 | | @param err output vector of errors: err_i = actual_f_i - ideal_f_i |
622 | | @param J output Jacobian: J_ij = d(ideal_f_i)/d(param_j) |
623 | | |
624 | | when J=noArray(), it means that it does not need to be computed. |
625 | | Dimensionality of error vector and param vector can be different. |
626 | | The callback should explicitly allocate (with "create" method) each output array |
627 | | (unless it's noArray()). |
628 | | */ |
629 | | virtual bool compute(InputArray param, OutputArray err, OutputArray J) const = 0; |
630 | | }; |
631 | | |
632 | | /** |
633 | | Runs Levenberg-Marquardt algorithm using the passed vector of parameters as the start point. |
634 | | The final vector of parameters (whether the algorithm converged or not) is stored at the same |
635 | | vector. The method returns the number of iterations used. If it's equal to the previously specified |
636 | | maxIters, there is a big chance the algorithm did not converge. |
637 | | |
638 | | @param param initial/final vector of parameters. |
639 | | |
640 | | Note that the dimensionality of parameter space is defined by the size of param vector, |
641 | | and the dimensionality of optimized criteria is defined by the size of err vector |
642 | | computed by the callback. |
643 | | */ |
644 | | virtual int run(InputOutputArray param) const = 0; |
645 | | |
646 | | /** |
647 | | Sets the maximum number of iterations |
648 | | @param maxIters the number of iterations |
649 | | */ |
650 | | virtual void setMaxIters(int maxIters) = 0; |
651 | | /** |
652 | | Retrieves the current maximum number of iterations |
653 | | */ |
654 | | virtual int getMaxIters() const = 0; |
655 | | |
656 | | /** |
657 | | Creates Levenberg-Marquard solver |
658 | | |
659 | | @param cb callback |
660 | | @param maxIters maximum number of iterations that can be further |
661 | | modified using setMaxIters() method. |
662 | | */ |
663 | | static Ptr<LMSolver> create(const Ptr<LMSolver::Callback>& cb, int maxIters); |
664 | | static Ptr<LMSolver> create(const Ptr<LMSolver::Callback>& cb, int maxIters, double eps); |
665 | | }; |
666 | | |
667 | | |
668 | | |
669 | | /** @example samples/cpp/tutorial_code/features2D/Homography/pose_from_homography.cpp |
670 | | An example program about pose estimation from coplanar points |
671 | | |
672 | | Check @ref tutorial_homography "the corresponding tutorial" for more details |
673 | | */ |
674 | | |
675 | | /** @brief Finds a perspective transformation between two planes. |
676 | | |
677 | | @param srcPoints Coordinates of the points in the original plane, a matrix of the type CV_32FC2 |
678 | | or vector\<Point2f\> . |
679 | | @param dstPoints Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or |
680 | | a vector\<Point2f\> . |
681 | | @param method Method used to compute a homography matrix. The following methods are possible: |
682 | | - **0** - a regular method using all the points, i.e., the least squares method |
683 | | - @ref RANSAC - RANSAC-based robust method |
684 | | - @ref LMEDS - Least-Median robust method |
685 | | - @ref RHO - PROSAC-based robust method |
686 | | @param ransacReprojThreshold Maximum allowed reprojection error to treat a point pair as an inlier |
687 | | (used in the RANSAC and RHO methods only). That is, if |
688 | | \f[\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} \cdot \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\f] |
689 | | then the point \f$i\f$ is considered as an outlier. If srcPoints and dstPoints are measured in pixels, |
690 | | it usually makes sense to set this parameter somewhere in the range of 1 to 10. |
691 | | @param mask Optional output mask set by a robust method ( RANSAC or LMeDS ). Note that the input |
692 | | mask values are ignored. |
693 | | @param maxIters The maximum number of RANSAC iterations. |
694 | | @param confidence Confidence level, between 0 and 1. |
695 | | |
696 | | The function finds and returns the perspective transformation \f$H\f$ between the source and the |
697 | | destination planes: |
698 | | |
699 | | \f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f] |
700 | | |
701 | | so that the back-projection error |
702 | | |
703 | | \f[\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\f] |
704 | | |
705 | | is minimized. If the parameter method is set to the default value 0, the function uses all the point |
706 | | pairs to compute an initial homography estimate with a simple least-squares scheme. |
707 | | |
708 | | However, if not all of the point pairs ( \f$srcPoints_i\f$, \f$dstPoints_i\f$ ) fit the rigid perspective |
709 | | transformation (that is, there are some outliers), this initial estimate will be poor. In this case, |
710 | | you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different |
711 | | random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix |
712 | | using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the |
713 | | computed homography (which is the number of inliers for RANSAC or the least median re-projection error for |
714 | | LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and |
715 | | the mask of inliers/outliers. |
716 | | |
717 | | Regardless of the method, robust or not, the computed homography matrix is refined further (using |
718 | | inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the |
719 | | re-projection error even more. |
720 | | |
721 | | The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to |
722 | | distinguish inliers from outliers. The method LMeDS does not need any threshold but it works |
723 | | correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the |
724 | | noise is rather small, use the default method (method=0). |
725 | | |
726 | | The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is |
727 | | determined up to a scale. Thus, it is normalized so that \f$h_{33}=1\f$. Note that whenever an \f$H\f$ matrix |
728 | | cannot be estimated, an empty one will be returned. |
729 | | |
730 | | @sa |
731 | | getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective, |
732 | | perspectiveTransform |
733 | | */ |
734 | | CV_EXPORTS_W Mat findHomography( InputArray srcPoints, InputArray dstPoints, |
735 | | int method = 0, double ransacReprojThreshold = 3, |
736 | | OutputArray mask=noArray(), const int maxIters = 2000, |
737 | | const double confidence = 0.995); |
738 | | |
739 | | /** @overload */ |
740 | | CV_EXPORTS Mat findHomography( InputArray srcPoints, InputArray dstPoints, |
741 | | OutputArray mask, int method = 0, double ransacReprojThreshold = 3 ); |
742 | | |
743 | | |
744 | | CV_EXPORTS_W Mat findHomography(InputArray srcPoints, InputArray dstPoints, OutputArray mask, |
745 | | const UsacParams ¶ms); |
746 | | |
747 | | /** @brief Computes an RQ decomposition of 3x3 matrices. |
748 | | |
749 | | @param src 3x3 input matrix. |
750 | | @param mtxR Output 3x3 upper-triangular matrix. |
751 | | @param mtxQ Output 3x3 orthogonal matrix. |
752 | | @param Qx Optional output 3x3 rotation matrix around x-axis. |
753 | | @param Qy Optional output 3x3 rotation matrix around y-axis. |
754 | | @param Qz Optional output 3x3 rotation matrix around z-axis. |
755 | | |
756 | | The function computes a RQ decomposition using the given rotations. This function is used in |
757 | | #decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera |
758 | | and a rotation matrix. |
759 | | |
760 | | It optionally returns three rotation matrices, one for each axis, and the three Euler angles in |
761 | | degrees (as the return value) that could be used in OpenGL. Note, there is always more than one |
762 | | sequence of rotations about the three principal axes that results in the same orientation of an |
763 | | object, e.g. see @cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angles |
764 | | are only one of the possible solutions. |
765 | | */ |
766 | | CV_EXPORTS_W Vec3d RQDecomp3x3( InputArray src, OutputArray mtxR, OutputArray mtxQ, |
767 | | OutputArray Qx = noArray(), |
768 | | OutputArray Qy = noArray(), |
769 | | OutputArray Qz = noArray()); |
770 | | |
771 | | /** @brief Decomposes a projection matrix into a rotation matrix and a camera intrinsic matrix. |
772 | | |
773 | | @param projMatrix 3x4 input projection matrix P. |
774 | | @param cameraMatrix Output 3x3 camera intrinsic matrix \f$\cameramatrix{A}\f$. |
775 | | @param rotMatrix Output 3x3 external rotation matrix R. |
776 | | @param transVect Output 4x1 translation vector T. |
777 | | @param rotMatrixX Optional 3x3 rotation matrix around x-axis. |
778 | | @param rotMatrixY Optional 3x3 rotation matrix around y-axis. |
779 | | @param rotMatrixZ Optional 3x3 rotation matrix around z-axis. |
780 | | @param eulerAngles Optional three-element vector containing three Euler angles of rotation in |
781 | | degrees. |
782 | | |
783 | | The function computes a decomposition of a projection matrix into a calibration and a rotation |
784 | | matrix and the position of a camera. |
785 | | |
786 | | It optionally returns three rotation matrices, one for each axis, and three Euler angles that could |
787 | | be used in OpenGL. Note, there is always more than one sequence of rotations about the three |
788 | | principal axes that results in the same orientation of an object, e.g. see @cite Slabaugh . Returned |
789 | | tree rotation matrices and corresponding three Euler angles are only one of the possible solutions. |
790 | | |
791 | | The function is based on #RQDecomp3x3 . |
792 | | */ |
793 | | CV_EXPORTS_W void decomposeProjectionMatrix( InputArray projMatrix, OutputArray cameraMatrix, |
794 | | OutputArray rotMatrix, OutputArray transVect, |
795 | | OutputArray rotMatrixX = noArray(), |
796 | | OutputArray rotMatrixY = noArray(), |
797 | | OutputArray rotMatrixZ = noArray(), |
798 | | OutputArray eulerAngles =noArray() ); |
799 | | |
800 | | /** @brief Computes partial derivatives of the matrix product for each multiplied matrix. |
801 | | |
802 | | @param A First multiplied matrix. |
803 | | @param B Second multiplied matrix. |
804 | | @param dABdA First output derivative matrix d(A\*B)/dA of size |
805 | | \f$\texttt{A.rows*B.cols} \times {A.rows*A.cols}\f$ . |
806 | | @param dABdB Second output derivative matrix d(A\*B)/dB of size |
807 | | \f$\texttt{A.rows*B.cols} \times {B.rows*B.cols}\f$ . |
808 | | |
809 | | The function computes partial derivatives of the elements of the matrix product \f$A*B\f$ with regard to |
810 | | the elements of each of the two input matrices. The function is used to compute the Jacobian |
811 | | matrices in #stereoCalibrate but can also be used in any other similar optimization function. |
812 | | */ |
813 | | CV_EXPORTS_W void matMulDeriv( InputArray A, InputArray B, OutputArray dABdA, OutputArray dABdB ); |
814 | | |
815 | | /** @brief Combines two rotation-and-shift transformations. |
816 | | |
817 | | @param rvec1 First rotation vector. |
818 | | @param tvec1 First translation vector. |
819 | | @param rvec2 Second rotation vector. |
820 | | @param tvec2 Second translation vector. |
821 | | @param rvec3 Output rotation vector of the superposition. |
822 | | @param tvec3 Output translation vector of the superposition. |
823 | | @param dr3dr1 Optional output derivative of rvec3 with regard to rvec1 |
824 | | @param dr3dt1 Optional output derivative of rvec3 with regard to tvec1 |
825 | | @param dr3dr2 Optional output derivative of rvec3 with regard to rvec2 |
826 | | @param dr3dt2 Optional output derivative of rvec3 with regard to tvec2 |
827 | | @param dt3dr1 Optional output derivative of tvec3 with regard to rvec1 |
828 | | @param dt3dt1 Optional output derivative of tvec3 with regard to tvec1 |
829 | | @param dt3dr2 Optional output derivative of tvec3 with regard to rvec2 |
830 | | @param dt3dt2 Optional output derivative of tvec3 with regard to tvec2 |
831 | | |
832 | | The functions compute: |
833 | | |
834 | | \f[\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\f] |
835 | | |
836 | | where \f$\mathrm{rodrigues}\f$ denotes a rotation vector to a rotation matrix transformation, and |
837 | | \f$\mathrm{rodrigues}^{-1}\f$ denotes the inverse transformation. See #Rodrigues for details. |
838 | | |
839 | | Also, the functions can compute the derivatives of the output vectors with regards to the input |
840 | | vectors (see #matMulDeriv ). The functions are used inside #stereoCalibrate but can also be used in |
841 | | your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a |
842 | | function that contains a matrix multiplication. |
843 | | */ |
844 | | CV_EXPORTS_W void composeRT( InputArray rvec1, InputArray tvec1, |
845 | | InputArray rvec2, InputArray tvec2, |
846 | | OutputArray rvec3, OutputArray tvec3, |
847 | | OutputArray dr3dr1 = noArray(), OutputArray dr3dt1 = noArray(), |
848 | | OutputArray dr3dr2 = noArray(), OutputArray dr3dt2 = noArray(), |
849 | | OutputArray dt3dr1 = noArray(), OutputArray dt3dt1 = noArray(), |
850 | | OutputArray dt3dr2 = noArray(), OutputArray dt3dt2 = noArray() ); |
851 | | |
852 | | /** @brief Projects 3D points to an image plane. |
853 | | |
854 | | @param objectPoints Array of object points expressed wrt. the world coordinate frame. A 3xN/Nx3 |
855 | | 1-channel or 1xN/Nx1 3-channel (or vector\<Point3f\> ), where N is the number of points in the view. |
856 | | @param rvec The rotation vector (@ref Rodrigues) that, together with tvec, performs a change of |
857 | | basis from world to camera coordinate system, see @ref calibrateCamera for details. |
858 | | @param tvec The translation vector, see parameter description above. |
859 | | @param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ . |
860 | | @param distCoeffs Input vector of distortion coefficients |
861 | | \f$\distcoeffs\f$ . If the vector is empty, the zero distortion coefficients are assumed. |
862 | | @param imagePoints Output array of image points, 1xN/Nx1 2-channel, or |
863 | | vector\<Point2f\> . |
864 | | @param jacobian Optional output 2Nx(10+\<numDistCoeffs\>) jacobian matrix of derivatives of image |
865 | | points with respect to components of the rotation vector, translation vector, focal lengths, |
866 | | coordinates of the principal point and the distortion coefficients. In the old interface different |
867 | | components of the jacobian are returned via different output parameters. |
868 | | @param aspectRatio Optional "fixed aspect ratio" parameter. If the parameter is not 0, the |
869 | | function assumes that the aspect ratio (\f$f_x / f_y\f$) is fixed and correspondingly adjusts the |
870 | | jacobian matrix. |
871 | | |
872 | | The function computes the 2D projections of 3D points to the image plane, given intrinsic and |
873 | | extrinsic camera parameters. Optionally, the function computes Jacobians -matrices of partial |
874 | | derivatives of image points coordinates (as functions of all the input parameters) with respect to |
875 | | the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global |
876 | | optimization in @ref calibrateCamera, @ref solvePnP, and @ref stereoCalibrate. The function itself |
877 | | can also be used to compute a re-projection error, given the current intrinsic and extrinsic |
878 | | parameters. |
879 | | |
880 | | @note By setting rvec = tvec = \f$[0, 0, 0]\f$, or by setting cameraMatrix to a 3x3 identity matrix, |
881 | | or by passing zero distortion coefficients, one can get various useful partial cases of the |
882 | | function. This means, one can compute the distorted coordinates for a sparse set of points or apply |
883 | | a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup. |
884 | | */ |
885 | | CV_EXPORTS_W void projectPoints( InputArray objectPoints, |
886 | | InputArray rvec, InputArray tvec, |
887 | | InputArray cameraMatrix, InputArray distCoeffs, |
888 | | OutputArray imagePoints, |
889 | | OutputArray jacobian = noArray(), |
890 | | double aspectRatio = 0 ); |
891 | | |
892 | | /** @example samples/cpp/tutorial_code/features2D/Homography/homography_from_camera_displacement.cpp |
893 | | An example program about homography from the camera displacement |
894 | | |
895 | | Check @ref tutorial_homography "the corresponding tutorial" for more details |
896 | | */ |
897 | | |
898 | | /** @brief Finds an object pose from 3D-2D point correspondences. |
899 | | |
900 | | @see @ref calib3d_solvePnP |
901 | | |
902 | | This function returns the rotation and the translation vectors that transform a 3D point expressed in the object |
903 | | coordinate frame to the camera coordinate frame, using different methods: |
904 | | - P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): need 4 input points to return a unique solution. |
905 | | - @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. |
906 | | - @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. |
907 | | Number of input points must be 4. Object points must be defined in the following order: |
908 | | - point 0: [-squareLength / 2, squareLength / 2, 0] |
909 | | - point 1: [ squareLength / 2, squareLength / 2, 0] |
910 | | - point 2: [ squareLength / 2, -squareLength / 2, 0] |
911 | | - point 3: [-squareLength / 2, -squareLength / 2, 0] |
912 | | - for all the other flags, number of input points must be >= 4 and object points can be in any configuration. |
913 | | |
914 | | @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or |
915 | | 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here. |
916 | | @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, |
917 | | where N is the number of points. vector\<Point2d\> can be also passed here. |
918 | | @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . |
919 | | @param distCoeffs Input vector of distortion coefficients |
920 | | \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are |
921 | | assumed. |
922 | | @param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from |
923 | | the model coordinate system to the camera coordinate system. |
924 | | @param tvec Output translation vector. |
925 | | @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses |
926 | | the provided rvec and tvec values as initial approximations of the rotation and translation |
927 | | vectors, respectively, and further optimizes them. |
928 | | @param flags Method for solving a PnP problem: see @ref calib3d_solvePnP_flags |
929 | | |
930 | | More information about Perspective-n-Points is described in @ref calib3d_solvePnP |
931 | | |
932 | | @note |
933 | | - An example of how to use solvePnP for planar augmented reality can be found at |
934 | | opencv_source_code/samples/python/plane_ar.py |
935 | | - If you are using Python: |
936 | | - Numpy array slices won't work as input because solvePnP requires contiguous |
937 | | arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of |
938 | | modules/calib3d/src/solvepnp.cpp version 2.4.9) |
939 | | - The P3P algorithm requires image points to be in an array of shape (N,1,2) due |
940 | | to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) |
941 | | which requires 2-channel information. |
942 | | - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of |
943 | | it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = |
944 | | np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) |
945 | | - The methods @ref SOLVEPNP_DLS and @ref SOLVEPNP_UPNP cannot be used as the current implementations are |
946 | | unstable and sometimes give completely wrong results. If you pass one of these two |
947 | | flags, @ref SOLVEPNP_EPNP method will be used instead. |
948 | | - The minimum number of points is 4 in the general case. In the case of @ref SOLVEPNP_P3P and @ref SOLVEPNP_AP3P |
949 | | methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions |
950 | | of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). |
951 | | - With @ref SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points |
952 | | are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the |
953 | | global solution to converge. |
954 | | - With @ref SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. |
955 | | - With @ref SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. |
956 | | Number of input points must be 4. Object points must be defined in the following order: |
957 | | - point 0: [-squareLength / 2, squareLength / 2, 0] |
958 | | - point 1: [ squareLength / 2, squareLength / 2, 0] |
959 | | - point 2: [ squareLength / 2, -squareLength / 2, 0] |
960 | | - point 3: [-squareLength / 2, -squareLength / 2, 0] |
961 | | - With @ref SOLVEPNP_SQPNP input points must be >= 3 |
962 | | */ |
963 | | CV_EXPORTS_W bool solvePnP( InputArray objectPoints, InputArray imagePoints, |
964 | | InputArray cameraMatrix, InputArray distCoeffs, |
965 | | OutputArray rvec, OutputArray tvec, |
966 | | bool useExtrinsicGuess = false, int flags = SOLVEPNP_ITERATIVE ); |
967 | | |
968 | | /** @brief Finds an object pose from 3D-2D point correspondences using the RANSAC scheme. |
969 | | |
970 | | @see @ref calib3d_solvePnP |
971 | | |
972 | | @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or |
973 | | 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here. |
974 | | @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, |
975 | | where N is the number of points. vector\<Point2d\> can be also passed here. |
976 | | @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . |
977 | | @param distCoeffs Input vector of distortion coefficients |
978 | | \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are |
979 | | assumed. |
980 | | @param rvec Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from |
981 | | the model coordinate system to the camera coordinate system. |
982 | | @param tvec Output translation vector. |
983 | | @param useExtrinsicGuess Parameter used for @ref SOLVEPNP_ITERATIVE. If true (1), the function uses |
984 | | the provided rvec and tvec values as initial approximations of the rotation and translation |
985 | | vectors, respectively, and further optimizes them. |
986 | | @param iterationsCount Number of iterations. |
987 | | @param reprojectionError Inlier threshold value used by the RANSAC procedure. The parameter value |
988 | | is the maximum allowed distance between the observed and computed point projections to consider it |
989 | | an inlier. |
990 | | @param confidence The probability that the algorithm produces a useful result. |
991 | | @param inliers Output vector that contains indices of inliers in objectPoints and imagePoints . |
992 | | @param flags Method for solving a PnP problem (see @ref solvePnP ). |
993 | | |
994 | | The function estimates an object pose given a set of object points, their corresponding image |
995 | | projections, as well as the camera intrinsic matrix and the distortion coefficients. This function finds such |
996 | | a pose that minimizes reprojection error, that is, the sum of squared distances between the observed |
997 | | projections imagePoints and the projected (using @ref projectPoints ) objectPoints. The use of RANSAC |
998 | | makes the function resistant to outliers. |
999 | | |
1000 | | @note |
1001 | | - An example of how to use solvePNPRansac for object detection can be found at |
1002 | | opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/ |
1003 | | - The default method used to estimate the camera pose for the Minimal Sample Sets step |
1004 | | is #SOLVEPNP_EPNP. Exceptions are: |
1005 | | - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used. |
1006 | | - if the number of input points is equal to 4, #SOLVEPNP_P3P is used. |
1007 | | - The method used to estimate the camera pose using all the inliers is defined by the |
1008 | | flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case, |
1009 | | the method #SOLVEPNP_EPNP will be used instead. |
1010 | | */ |
1011 | | CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints, |
1012 | | InputArray cameraMatrix, InputArray distCoeffs, |
1013 | | OutputArray rvec, OutputArray tvec, |
1014 | | bool useExtrinsicGuess = false, int iterationsCount = 100, |
1015 | | float reprojectionError = 8.0, double confidence = 0.99, |
1016 | | OutputArray inliers = noArray(), int flags = SOLVEPNP_ITERATIVE ); |
1017 | | |
1018 | | |
1019 | | /* |
1020 | | Finds rotation and translation vector. |
1021 | | If cameraMatrix is given then run P3P. Otherwise run linear P6P and output cameraMatrix too. |
1022 | | */ |
1023 | | CV_EXPORTS_W bool solvePnPRansac( InputArray objectPoints, InputArray imagePoints, |
1024 | | InputOutputArray cameraMatrix, InputArray distCoeffs, |
1025 | | OutputArray rvec, OutputArray tvec, OutputArray inliers, |
1026 | | const UsacParams ¶ms=UsacParams()); |
1027 | | |
1028 | | /** @brief Finds an object pose from 3 3D-2D point correspondences. |
1029 | | |
1030 | | @see @ref calib3d_solvePnP |
1031 | | |
1032 | | @param objectPoints Array of object points in the object coordinate space, 3x3 1-channel or |
1033 | | 1x3/3x1 3-channel. vector\<Point3f\> can be also passed here. |
1034 | | @param imagePoints Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel. |
1035 | | vector\<Point2f\> can be also passed here. |
1036 | | @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . |
1037 | | @param distCoeffs Input vector of distortion coefficients |
1038 | | \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are |
1039 | | assumed. |
1040 | | @param rvecs Output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from |
1041 | | the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions. |
1042 | | @param tvecs Output translation vectors. |
1043 | | @param flags Method for solving a P3P problem: |
1044 | | - @ref SOLVEPNP_P3P Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang |
1045 | | "Complete Solution Classification for the Perspective-Three-Point Problem" (@cite gao2003complete). |
1046 | | - @ref SOLVEPNP_AP3P Method is based on the paper of T. Ke and S. Roumeliotis. |
1047 | | "An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (@cite Ke17). |
1048 | | |
1049 | | The function estimates the object pose given 3 object points, their corresponding image |
1050 | | projections, as well as the camera intrinsic matrix and the distortion coefficients. |
1051 | | |
1052 | | @note |
1053 | | The solutions are sorted by reprojection errors (lowest to highest). |
1054 | | */ |
1055 | | CV_EXPORTS_W int solveP3P( InputArray objectPoints, InputArray imagePoints, |
1056 | | InputArray cameraMatrix, InputArray distCoeffs, |
1057 | | OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, |
1058 | | int flags ); |
1059 | | |
1060 | | /** @brief Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame |
1061 | | to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution. |
1062 | | |
1063 | | @see @ref calib3d_solvePnP |
1064 | | |
1065 | | @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel, |
1066 | | where N is the number of points. vector\<Point3d\> can also be passed here. |
1067 | | @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, |
1068 | | where N is the number of points. vector\<Point2d\> can also be passed here. |
1069 | | @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . |
1070 | | @param distCoeffs Input vector of distortion coefficients |
1071 | | \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are |
1072 | | assumed. |
1073 | | @param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from |
1074 | | the model coordinate system to the camera coordinate system. Input values are used as an initial solution. |
1075 | | @param tvec Input/Output translation vector. Input values are used as an initial solution. |
1076 | | @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm. |
1077 | | |
1078 | | The function refines the object pose given at least 3 object points, their corresponding image |
1079 | | projections, an initial solution for the rotation and translation vector, |
1080 | | as well as the camera intrinsic matrix and the distortion coefficients. |
1081 | | The function minimizes the projection error with respect to the rotation and the translation vectors, according |
1082 | | to a Levenberg-Marquardt iterative minimization @cite Madsen04 @cite Eade13 process. |
1083 | | */ |
1084 | | CV_EXPORTS_W void solvePnPRefineLM( InputArray objectPoints, InputArray imagePoints, |
1085 | | InputArray cameraMatrix, InputArray distCoeffs, |
1086 | | InputOutputArray rvec, InputOutputArray tvec, |
1087 | | TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON)); |
1088 | | |
1089 | | /** @brief Refine a pose (the translation and the rotation that transform a 3D point expressed in the object coordinate frame |
1090 | | to the camera coordinate frame) from a 3D-2D point correspondences and starting from an initial solution. |
1091 | | |
1092 | | @see @ref calib3d_solvePnP |
1093 | | |
1094 | | @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or 1xN/Nx1 3-channel, |
1095 | | where N is the number of points. vector\<Point3d\> can also be passed here. |
1096 | | @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, |
1097 | | where N is the number of points. vector\<Point2d\> can also be passed here. |
1098 | | @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . |
1099 | | @param distCoeffs Input vector of distortion coefficients |
1100 | | \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are |
1101 | | assumed. |
1102 | | @param rvec Input/Output rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from |
1103 | | the model coordinate system to the camera coordinate system. Input values are used as an initial solution. |
1104 | | @param tvec Input/Output translation vector. Input values are used as an initial solution. |
1105 | | @param criteria Criteria when to stop the Levenberg-Marquard iterative algorithm. |
1106 | | @param VVSlambda Gain for the virtual visual servoing control law, equivalent to the \f$\alpha\f$ |
1107 | | gain in the Damped Gauss-Newton formulation. |
1108 | | |
1109 | | The function refines the object pose given at least 3 object points, their corresponding image |
1110 | | projections, an initial solution for the rotation and translation vector, |
1111 | | as well as the camera intrinsic matrix and the distortion coefficients. |
1112 | | The function minimizes the projection error with respect to the rotation and the translation vectors, using a |
1113 | | virtual visual servoing (VVS) @cite Chaumette06 @cite Marchand16 scheme. |
1114 | | */ |
1115 | | CV_EXPORTS_W void solvePnPRefineVVS( InputArray objectPoints, InputArray imagePoints, |
1116 | | InputArray cameraMatrix, InputArray distCoeffs, |
1117 | | InputOutputArray rvec, InputOutputArray tvec, |
1118 | | TermCriteria criteria = TermCriteria(TermCriteria::EPS + TermCriteria::COUNT, 20, FLT_EPSILON), |
1119 | | double VVSlambda = 1); |
1120 | | |
1121 | | /** @brief Finds an object pose from 3D-2D point correspondences. |
1122 | | |
1123 | | @see @ref calib3d_solvePnP |
1124 | | |
1125 | | This function returns a list of all the possible solutions (a solution is a <rotation vector, translation vector> |
1126 | | couple), depending on the number of input points and the chosen method: |
1127 | | - P3P methods (@ref SOLVEPNP_P3P, @ref SOLVEPNP_AP3P): 3 or 4 input points. Number of returned solutions can be between 0 and 4 with 3 input points. |
1128 | | - @ref SOLVEPNP_IPPE Input points must be >= 4 and object points must be coplanar. Returns 2 solutions. |
1129 | | - @ref SOLVEPNP_IPPE_SQUARE Special case suitable for marker pose estimation. |
1130 | | Number of input points must be 4 and 2 solutions are returned. Object points must be defined in the following order: |
1131 | | - point 0: [-squareLength / 2, squareLength / 2, 0] |
1132 | | - point 1: [ squareLength / 2, squareLength / 2, 0] |
1133 | | - point 2: [ squareLength / 2, -squareLength / 2, 0] |
1134 | | - point 3: [-squareLength / 2, -squareLength / 2, 0] |
1135 | | - for all the other flags, number of input points must be >= 4 and object points can be in any configuration. |
1136 | | Only 1 solution is returned. |
1137 | | |
1138 | | @param objectPoints Array of object points in the object coordinate space, Nx3 1-channel or |
1139 | | 1xN/Nx1 3-channel, where N is the number of points. vector\<Point3d\> can be also passed here. |
1140 | | @param imagePoints Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel, |
1141 | | where N is the number of points. vector\<Point2d\> can be also passed here. |
1142 | | @param cameraMatrix Input camera intrinsic matrix \f$\cameramatrix{A}\f$ . |
1143 | | @param distCoeffs Input vector of distortion coefficients |
1144 | | \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are |
1145 | | assumed. |
1146 | | @param rvecs Vector of output rotation vectors (see @ref Rodrigues ) that, together with tvecs, brings points from |
1147 | | the model coordinate system to the camera coordinate system. |
1148 | | @param tvecs Vector of output translation vectors. |
1149 | | @param useExtrinsicGuess Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses |
1150 | | the provided rvec and tvec values as initial approximations of the rotation and translation |
1151 | | vectors, respectively, and further optimizes them. |
1152 | | @param flags Method for solving a PnP problem: see @ref calib3d_solvePnP_flags |
1153 | | @param rvec Rotation vector used to initialize an iterative PnP refinement algorithm, when flag is @ref SOLVEPNP_ITERATIVE |
1154 | | and useExtrinsicGuess is set to true. |
1155 | | @param tvec Translation vector used to initialize an iterative PnP refinement algorithm, when flag is @ref SOLVEPNP_ITERATIVE |
1156 | | and useExtrinsicGuess is set to true. |
1157 | | @param reprojectionError Optional vector of reprojection error, that is the RMS error |
1158 | | (\f$ \text{RMSE} = \sqrt{\frac{\sum_{i}^{N} \left ( \hat{y_i} - y_i \right )^2}{N}} \f$) between the input image points |
1159 | | and the 3D object points projected with the estimated pose. |
1160 | | |
1161 | | More information is described in @ref calib3d_solvePnP |
1162 | | |
1163 | | @note |
1164 | | - An example of how to use solvePnP for planar augmented reality can be found at |
1165 | | opencv_source_code/samples/python/plane_ar.py |
1166 | | - If you are using Python: |
1167 | | - Numpy array slices won't work as input because solvePnP requires contiguous |
1168 | | arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of |
1169 | | modules/calib3d/src/solvepnp.cpp version 2.4.9) |
1170 | | - The P3P algorithm requires image points to be in an array of shape (N,1,2) due |
1171 | | to its calling of #undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9) |
1172 | | which requires 2-channel information. |
1173 | | - Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of |
1174 | | it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints = |
1175 | | np.ascontiguousarray(D[:,:2]).reshape((N,1,2)) |
1176 | | - The methods @ref SOLVEPNP_DLS and @ref SOLVEPNP_UPNP cannot be used as the current implementations are |
1177 | | unstable and sometimes give completely wrong results. If you pass one of these two |
1178 | | flags, @ref SOLVEPNP_EPNP method will be used instead. |
1179 | | - The minimum number of points is 4 in the general case. In the case of @ref SOLVEPNP_P3P and @ref SOLVEPNP_AP3P |
1180 | | methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions |
1181 | | of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error). |
1182 | | - With @ref SOLVEPNP_ITERATIVE method and `useExtrinsicGuess=true`, the minimum number of points is 3 (3 points |
1183 | | are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the |
1184 | | global solution to converge. |
1185 | | - With @ref SOLVEPNP_IPPE input points must be >= 4 and object points must be coplanar. |
1186 | | - With @ref SOLVEPNP_IPPE_SQUARE this is a special case suitable for marker pose estimation. |
1187 | | Number of input points must be 4. Object points must be defined in the following order: |
1188 | | - point 0: [-squareLength / 2, squareLength / 2, 0] |
1189 | | - point 1: [ squareLength / 2, squareLength / 2, 0] |
1190 | | - point 2: [ squareLength / 2, -squareLength / 2, 0] |
1191 | | - point 3: [-squareLength / 2, -squareLength / 2, 0] |
1192 | | */ |
1193 | | CV_EXPORTS_W int solvePnPGeneric( InputArray objectPoints, InputArray imagePoints, |
1194 | | InputArray cameraMatrix, InputArray distCoeffs, |
1195 | | OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, |
1196 | | bool useExtrinsicGuess = false, SolvePnPMethod flags = SOLVEPNP_ITERATIVE, |
1197 | | InputArray rvec = noArray(), InputArray tvec = noArray(), |
1198 | | OutputArray reprojectionError = noArray() ); |
1199 | | |
1200 | | /** @brief Finds an initial camera intrinsic matrix from 3D-2D point correspondences. |
1201 | | |
1202 | | @param objectPoints Vector of vectors of the calibration pattern points in the calibration pattern |
1203 | | coordinate space. In the old interface all the per-view vectors are concatenated. See |
1204 | | #calibrateCamera for details. |
1205 | | @param imagePoints Vector of vectors of the projections of the calibration pattern points. In the |
1206 | | old interface all the per-view vectors are concatenated. |
1207 | | @param imageSize Image size in pixels used to initialize the principal point. |
1208 | | @param aspectRatio If it is zero or negative, both \f$f_x\f$ and \f$f_y\f$ are estimated independently. |
1209 | | Otherwise, \f$f_x = f_y \cdot \texttt{aspectRatio}\f$ . |
1210 | | |
1211 | | The function estimates and returns an initial camera intrinsic matrix for the camera calibration process. |
1212 | | Currently, the function only supports planar calibration patterns, which are patterns where each |
1213 | | object point has z-coordinate =0. |
1214 | | */ |
1215 | | CV_EXPORTS_W Mat initCameraMatrix2D( InputArrayOfArrays objectPoints, |
1216 | | InputArrayOfArrays imagePoints, |
1217 | | Size imageSize, double aspectRatio = 1.0 ); |
1218 | | |
1219 | | /** @brief Finds the positions of internal corners of the chessboard. |
1220 | | |
1221 | | @param image Source chessboard view. It must be an 8-bit grayscale or color image. |
1222 | | @param patternSize Number of inner corners per a chessboard row and column |
1223 | | ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ). |
1224 | | @param corners Output array of detected corners. |
1225 | | @param flags Various operation flags that can be zero or a combination of the following values: |
1226 | | - @ref CALIB_CB_ADAPTIVE_THRESH Use adaptive thresholding to convert the image to black |
1227 | | and white, rather than a fixed threshold level (computed from the average image brightness). |
1228 | | - @ref CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with #equalizeHist before |
1229 | | applying fixed or adaptive thresholding. |
1230 | | - @ref CALIB_CB_FILTER_QUADS Use additional criteria (like contour area, perimeter, |
1231 | | square-like shape) to filter out false quads extracted at the contour retrieval stage. |
1232 | | - @ref CALIB_CB_FAST_CHECK Run a fast check on the image that looks for chessboard corners, |
1233 | | and shortcut the call if none is found. This can drastically speed up the call in the |
1234 | | degenerate condition when no chessboard is observed. |
1235 | | |
1236 | | The function attempts to determine whether the input image is a view of the chessboard pattern and |
1237 | | locate the internal chessboard corners. The function returns a non-zero value if all of the corners |
1238 | | are found and they are placed in a certain order (row by row, left to right in every row). |
1239 | | Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example, |
1240 | | a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black |
1241 | | squares touch each other. The detected coordinates are approximate, and to determine their positions |
1242 | | more accurately, the function calls #cornerSubPix. You also may use the function #cornerSubPix with |
1243 | | different parameters if returned coordinates are not accurate enough. |
1244 | | |
1245 | | Sample usage of detecting and drawing chessboard corners: : |
1246 | | @code |
1247 | | Size patternsize(8,6); //interior number of corners |
1248 | | Mat gray = ....; //source image |
1249 | | vector<Point2f> corners; //this will be filled by the detected corners |
1250 | | |
1251 | | //CALIB_CB_FAST_CHECK saves a lot of time on images |
1252 | | //that do not contain any chessboard corners |
1253 | | bool patternfound = findChessboardCorners(gray, patternsize, corners, |
1254 | | CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE |
1255 | | + CALIB_CB_FAST_CHECK); |
1256 | | |
1257 | | if(patternfound) |
1258 | | cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1), |
1259 | | TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1)); |
1260 | | |
1261 | | drawChessboardCorners(img, patternsize, Mat(corners), patternfound); |
1262 | | @endcode |
1263 | | @note The function requires white space (like a square-thick border, the wider the better) around |
1264 | | the board to make the detection more robust in various environments. Otherwise, if there is no |
1265 | | border and the background is dark, the outer black squares cannot be segmented properly and so the |
1266 | | square grouping and ordering algorithm fails. |
1267 | | |
1268 | | Use gen_pattern.py (@ref tutorial_camera_calibration_pattern) to create checkerboard. |
1269 | | */ |
1270 | | CV_EXPORTS_W bool findChessboardCorners( InputArray image, Size patternSize, OutputArray corners, |
1271 | | int flags = CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE ); |
1272 | | |
1273 | | /* |
1274 | | Checks whether the image contains chessboard of the specific size or not. |
1275 | | If yes, nonzero value is returned. |
1276 | | */ |
1277 | | CV_EXPORTS_W bool checkChessboard(InputArray img, Size size); |
1278 | | |
1279 | | /** @brief Finds the positions of internal corners of the chessboard using a sector based approach. |
1280 | | |
1281 | | @param image Source chessboard view. It must be an 8-bit grayscale or color image. |
1282 | | @param patternSize Number of inner corners per a chessboard row and column |
1283 | | ( patternSize = cv::Size(points_per_row,points_per_colum) = cv::Size(columns,rows) ). |
1284 | | @param corners Output array of detected corners. |
1285 | | @param flags Various operation flags that can be zero or a combination of the following values: |
1286 | | - @ref CALIB_CB_NORMALIZE_IMAGE Normalize the image gamma with equalizeHist before detection. |
1287 | | - @ref CALIB_CB_EXHAUSTIVE Run an exhaustive search to improve detection rate. |
1288 | | - @ref CALIB_CB_ACCURACY Up sample input image to improve sub-pixel accuracy due to aliasing effects. |
1289 | | - @ref CALIB_CB_LARGER The detected pattern is allowed to be larger than patternSize (see description). |
1290 | | - @ref CALIB_CB_MARKER The detected pattern must have a marker (see description). |
1291 | | This should be used if an accurate camera calibration is required. |
1292 | | @param meta Optional output arrray of detected corners (CV_8UC1 and size = cv::Size(columns,rows)). |
1293 | | Each entry stands for one corner of the pattern and can have one of the following values: |
1294 | | - 0 = no meta data attached |
1295 | | - 1 = left-top corner of a black cell |
1296 | | - 2 = left-top corner of a white cell |
1297 | | - 3 = left-top corner of a black cell with a white marker dot |
1298 | | - 4 = left-top corner of a white cell with a black marker dot (pattern origin in case of markers otherwise first corner) |
1299 | | |
1300 | | The function is analog to #findChessboardCorners but uses a localized radon |
1301 | | transformation approximated by box filters being more robust to all sort of |
1302 | | noise, faster on larger images and is able to directly return the sub-pixel |
1303 | | position of the internal chessboard corners. The Method is based on the paper |
1304 | | @cite duda2018 "Accurate Detection and Localization of Checkerboard Corners for |
1305 | | Calibration" demonstrating that the returned sub-pixel positions are more |
1306 | | accurate than the one returned by cornerSubPix allowing a precise camera |
1307 | | calibration for demanding applications. |
1308 | | |
1309 | | In the case, the flags @ref CALIB_CB_LARGER or @ref CALIB_CB_MARKER are given, |
1310 | | the result can be recovered from the optional meta array. Both flags are |
1311 | | helpful to use calibration patterns exceeding the field of view of the camera. |
1312 | | These oversized patterns allow more accurate calibrations as corners can be |
1313 | | utilized, which are as close as possible to the image borders. For a |
1314 | | consistent coordinate system across all images, the optional marker (see image |
1315 | | below) can be used to move the origin of the board to the location where the |
1316 | | black circle is located. |
1317 | | |
1318 | | @note The function requires a white boarder with roughly the same width as one |
1319 | | of the checkerboard fields around the whole board to improve the detection in |
1320 | | various environments. In addition, because of the localized radon |
1321 | | transformation it is beneficial to use round corners for the field corners |
1322 | | which are located on the outside of the board. The following figure illustrates |
1323 | | a sample checkerboard optimized for the detection. However, any other checkerboard |
1324 | | can be used as well. |
1325 | | |
1326 | | Use gen_pattern.py (@ref tutorial_camera_calibration_pattern) to create checkerboard. |
1327 | |  |
1328 | | */ |
1329 | | CV_EXPORTS_AS(findChessboardCornersSBWithMeta) |
1330 | | bool findChessboardCornersSB(InputArray image,Size patternSize, OutputArray corners, |
1331 | | int flags,OutputArray meta); |
1332 | | /** @overload */ |
1333 | | CV_EXPORTS_W inline |
1334 | | bool findChessboardCornersSB(InputArray image, Size patternSize, OutputArray corners, |
1335 | | int flags = 0) |
1336 | 0 | { |
1337 | 0 | return findChessboardCornersSB(image, patternSize, corners, flags, noArray()); |
1338 | 0 | } |
1339 | | |
1340 | | /** @brief Estimates the sharpness of a detected chessboard. |
1341 | | |
1342 | | Image sharpness, as well as brightness, are a critical parameter for accuracte |
1343 | | camera calibration. For accessing these parameters for filtering out |
1344 | | problematic calibraiton images, this method calculates edge profiles by traveling from |
1345 | | black to white chessboard cell centers. Based on this, the number of pixels is |
1346 | | calculated required to transit from black to white. This width of the |
1347 | | transition area is a good indication of how sharp the chessboard is imaged |
1348 | | and should be below ~3.0 pixels. |
1349 | | |
1350 | | @param image Gray image used to find chessboard corners |
1351 | | @param patternSize Size of a found chessboard pattern |
1352 | | @param corners Corners found by #findChessboardCornersSB |
1353 | | @param rise_distance Rise distance 0.8 means 10% ... 90% of the final signal strength |
1354 | | @param vertical By default edge responses for horizontal lines are calculated |
1355 | | @param sharpness Optional output array with a sharpness value for calculated edge responses (see description) |
1356 | | |
1357 | | The optional sharpness array is of type CV_32FC1 and has for each calculated |
1358 | | profile one row with the following five entries: |
1359 | | * 0 = x coordinate of the underlying edge in the image |
1360 | | * 1 = y coordinate of the underlying edge in the image |
1361 | | * 2 = width of the transition area (sharpness) |
1362 | | * 3 = signal strength in the black cell (min brightness) |
1363 | | * 4 = signal strength in the white cell (max brightness) |
1364 | | |
1365 | | @return Scalar(average sharpness, average min brightness, average max brightness,0) |
1366 | | */ |
1367 | | CV_EXPORTS_W Scalar estimateChessboardSharpness(InputArray image, Size patternSize, InputArray corners, |
1368 | | float rise_distance=0.8F,bool vertical=false, |
1369 | | OutputArray sharpness=noArray()); |
1370 | | |
1371 | | |
1372 | | //! finds subpixel-accurate positions of the chessboard corners |
1373 | | CV_EXPORTS_W bool find4QuadCornerSubpix( InputArray img, InputOutputArray corners, Size region_size ); |
1374 | | |
1375 | | /** @brief Renders the detected chessboard corners. |
1376 | | |
1377 | | @param image Destination image. It must be an 8-bit color image. |
1378 | | @param patternSize Number of inner corners per a chessboard row and column |
1379 | | (patternSize = cv::Size(points_per_row,points_per_column)). |
1380 | | @param corners Array of detected corners, the output of #findChessboardCorners. |
1381 | | @param patternWasFound Parameter indicating whether the complete board was found or not. The |
1382 | | return value of #findChessboardCorners should be passed here. |
1383 | | |
1384 | | The function draws individual chessboard corners detected either as red circles if the board was not |
1385 | | found, or as colored corners connected with lines if the board was found. |
1386 | | */ |
1387 | | CV_EXPORTS_W void drawChessboardCorners( InputOutputArray image, Size patternSize, |
1388 | | InputArray corners, bool patternWasFound ); |
1389 | | |
1390 | | /** @brief Draw axes of the world/object coordinate system from pose estimation. @sa solvePnP |
1391 | | |
1392 | | @param image Input/output image. It must have 1 or 3 channels. The number of channels is not altered. |
1393 | | @param cameraMatrix Input 3x3 floating-point matrix of camera intrinsic parameters. |
1394 | | \f$\cameramatrix{A}\f$ |
1395 | | @param distCoeffs Input vector of distortion coefficients |
1396 | | \f$\distcoeffs\f$. If the vector is empty, the zero distortion coefficients are assumed. |
1397 | | @param rvec Rotation vector (see @ref Rodrigues ) that, together with tvec, brings points from |
1398 | | the model coordinate system to the camera coordinate system. |
1399 | | @param tvec Translation vector. |
1400 | | @param length Length of the painted axes in the same unit than tvec (usually in meters). |
1401 | | @param thickness Line thickness of the painted axes. |
1402 | | |
1403 | | This function draws the axes of the world/object coordinate system w.r.t. to the camera frame. |
1404 | | OX is drawn in red, OY in green and OZ in blue. |
1405 | | */ |
1406 | | CV_EXPORTS_W void drawFrameAxes(InputOutputArray image, InputArray cameraMatrix, InputArray distCoeffs, |
1407 | | InputArray rvec, InputArray tvec, float length, int thickness=3); |
1408 | | |
1409 | | struct CV_EXPORTS_W_SIMPLE CirclesGridFinderParameters |
1410 | | { |
1411 | | CV_WRAP CirclesGridFinderParameters(); |
1412 | | CV_PROP_RW cv::Size2f densityNeighborhoodSize; |
1413 | | CV_PROP_RW float minDensity; |
1414 | | CV_PROP_RW int kmeansAttempts; |
1415 | | CV_PROP_RW int minDistanceToAddKeypoint; |
1416 | | CV_PROP_RW int keypointScale; |
1417 | | CV_PROP_RW float minGraphConfidence; |
1418 | | CV_PROP_RW float vertexGain; |
1419 | | CV_PROP_RW float vertexPenalty; |
1420 | | CV_PROP_RW float existingVertexGain; |
1421 | | CV_PROP_RW float edgeGain; |
1422 | | CV_PROP_RW float edgePenalty; |
1423 | | CV_PROP_RW float convexHullFactor; |
1424 | | CV_PROP_RW float minRNGEdgeSwitchDist; |
1425 | | |
1426 | | enum GridType |
1427 | | { |
1428 | | SYMMETRIC_GRID, ASYMMETRIC_GRID |
1429 | | }; |
1430 | | GridType gridType; |
1431 | | |
1432 | | CV_PROP_RW float squareSize; //!< Distance between two adjacent points. Used by CALIB_CB_CLUSTERING. |
1433 | | CV_PROP_RW float maxRectifiedDistance; //!< Max deviation from prediction. Used by CALIB_CB_CLUSTERING. |
1434 | | }; |
1435 | | |
1436 | | #ifndef DISABLE_OPENCV_3_COMPATIBILITY |
1437 | | typedef CirclesGridFinderParameters CirclesGridFinderParameters2; |
1438 | | #endif |
1439 | | |
1440 | | /** @brief Finds centers in the grid of circles. |
1441 | | |
1442 | | @param image grid view of input circles; it must be an 8-bit grayscale or color image. |
1443 | | @param patternSize number of circles per row and column |
1444 | | ( patternSize = Size(points_per_row, points_per_colum) ). |
1445 | | @param centers output array of detected centers. |
1446 | | @param flags various operation flags that can be one of the following values: |
1447 | | - @ref CALIB_CB_SYMMETRIC_GRID uses symmetric pattern of circles. |
1448 | | - @ref CALIB_CB_ASYMMETRIC_GRID uses asymmetric pattern of circles. |
1449 | | - @ref CALIB_CB_CLUSTERING uses a special algorithm for grid detection. It is more robust to |
1450 | | perspective distortions but much more sensitive to background clutter. |
1451 | | @param blobDetector feature detector that finds blobs like dark circles on light background. |
1452 | | If `blobDetector` is NULL then `image` represents Point2f array of candidates. |
1453 | | @param parameters struct for finding circles in a grid pattern. |
1454 | | |
1455 | | The function attempts to determine whether the input image contains a grid of circles. If it is, the |
1456 | | function locates centers of the circles. The function returns a non-zero value if all of the centers |
1457 | | have been found and they have been placed in a certain order (row by row, left to right in every |
1458 | | row). Otherwise, if the function fails to find all the corners or reorder them, it returns 0. |
1459 | | |
1460 | | Sample usage of detecting and drawing the centers of circles: : |
1461 | | @code |
1462 | | Size patternsize(7,7); //number of centers |
1463 | | Mat gray = ...; //source image |
1464 | | vector<Point2f> centers; //this will be filled by the detected centers |
1465 | | |
1466 | | bool patternfound = findCirclesGrid(gray, patternsize, centers); |
1467 | | |
1468 | | drawChessboardCorners(img, patternsize, Mat(centers), patternfound); |
1469 | | @endcode |
1470 | | @note The function requires white space (like a square-thick border, the wider the better) around |
1471 | | the board to make the detection more robust in various environments. |
1472 | | */ |
1473 | | CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize, |
1474 | | OutputArray centers, int flags, |
1475 | | const Ptr<FeatureDetector> &blobDetector, |
1476 | | const CirclesGridFinderParameters& parameters); |
1477 | | |
1478 | | /** @overload */ |
1479 | | CV_EXPORTS_W bool findCirclesGrid( InputArray image, Size patternSize, |
1480 | | OutputArray centers, int flags = CALIB_CB_SYMMETRIC_GRID, |
1481 | | const Ptr<FeatureDetector> &blobDetector = SimpleBlobDetector::create()); |
1482 | | |
1483 | | /** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration |
1484 | | pattern. |
1485 | | |
1486 | | @param objectPoints In the new interface it is a vector of vectors of calibration pattern points in |
1487 | | the calibration pattern coordinate space (e.g. std::vector<std::vector<cv::Vec3f>>). The outer |
1488 | | vector contains as many elements as the number of pattern views. If the same calibration pattern |
1489 | | is shown in each view and it is fully visible, all the vectors will be the same. Although, it is |
1490 | | possible to use partially occluded patterns or even different patterns in different views. Then, |
1491 | | the vectors will be different. Although the points are 3D, they all lie in the calibration pattern's |
1492 | | XY coordinate plane (thus 0 in the Z-coordinate), if the used calibration pattern is a planar rig. |
1493 | | In the old interface all the vectors of object points from different views are concatenated |
1494 | | together. |
1495 | | @param imagePoints In the new interface it is a vector of vectors of the projections of calibration |
1496 | | pattern points (e.g. std::vector<std::vector<cv::Vec2f>>). imagePoints.size() and |
1497 | | objectPoints.size(), and imagePoints[i].size() and objectPoints[i].size() for each i, must be equal, |
1498 | | respectively. In the old interface all the vectors of object points from different views are |
1499 | | concatenated together. |
1500 | | @param imageSize Size of the image used only to initialize the camera intrinsic matrix. |
1501 | | @param cameraMatrix Input/output 3x3 floating-point camera intrinsic matrix |
1502 | | \f$\cameramatrix{A}\f$ . If @ref CALIB_USE_INTRINSIC_GUESS |
1503 | | and/or @ref CALIB_FIX_ASPECT_RATIO, @ref CALIB_FIX_PRINCIPAL_POINT or @ref CALIB_FIX_FOCAL_LENGTH |
1504 | | are specified, some or all of fx, fy, cx, cy must be initialized before calling the function. |
1505 | | @param distCoeffs Input/output vector of distortion coefficients |
1506 | | \f$\distcoeffs\f$. |
1507 | | @param rvecs Output vector of rotation vectors (@ref Rodrigues ) estimated for each pattern view |
1508 | | (e.g. std::vector<cv::Mat>>). That is, each i-th rotation vector together with the corresponding |
1509 | | i-th translation vector (see the next output parameter description) brings the calibration pattern |
1510 | | from the object coordinate space (in which object points are specified) to the camera coordinate |
1511 | | space. In more technical terms, the tuple of the i-th rotation and translation vector performs |
1512 | | a change of basis from object coordinate space to camera coordinate space. Due to its duality, this |
1513 | | tuple is equivalent to the position of the calibration pattern with respect to the camera coordinate |
1514 | | space. |
1515 | | @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter |
1516 | | describtion above. |
1517 | | @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic |
1518 | | parameters. Order of deviations values: |
1519 | | \f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3, |
1520 | | s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero. |
1521 | | @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic |
1522 | | parameters. Order of deviations values: \f$(R_0, T_0, \dotsc , R_{M - 1}, T_{M - 1})\f$ where M is |
1523 | | the number of pattern views. \f$R_i, T_i\f$ are concatenated 1x3 vectors. |
1524 | | @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. |
1525 | | @param flags Different flags that may be zero or a combination of the following values: |
1526 | | - @ref CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of |
1527 | | fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image |
1528 | | center ( imageSize is used), and focal distances are computed in a least-squares fashion. |
1529 | | Note, that if intrinsic parameters are known, there is no need to use this function just to |
1530 | | estimate extrinsic parameters. Use @ref solvePnP instead. |
1531 | | - @ref CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global |
1532 | | optimization. It stays at the center or at a different location specified when |
1533 | | @ref CALIB_USE_INTRINSIC_GUESS is set too. |
1534 | | - @ref CALIB_FIX_ASPECT_RATIO The functions consider only fy as a free parameter. The |
1535 | | ratio fx/fy stays the same as in the input cameraMatrix . When |
1536 | | @ref CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are |
1537 | | ignored, only their ratio is computed and used further. |
1538 | | - @ref CALIB_ZERO_TANGENT_DIST Tangential distortion coefficients \f$(p_1, p_2)\f$ are set |
1539 | | to zeros and stay zero. |
1540 | | - @ref CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global optimization if |
1541 | | @ref CALIB_USE_INTRINSIC_GUESS is set. |
1542 | | - @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K6 The corresponding radial distortion |
1543 | | coefficient is not changed during the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is |
1544 | | set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0. |
1545 | | - @ref CALIB_RATIONAL_MODEL Coefficients k4, k5, and k6 are enabled. To provide the |
1546 | | backward compatibility, this extra flag should be explicitly specified to make the |
1547 | | calibration function use the rational model and return 8 coefficients or more. |
1548 | | - @ref CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the |
1549 | | backward compatibility, this extra flag should be explicitly specified to make the |
1550 | | calibration function use the thin prism model and return 12 coefficients or more. |
1551 | | - @ref CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during |
1552 | | the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the |
1553 | | supplied distCoeffs matrix is used. Otherwise, it is set to 0. |
1554 | | - @ref CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the |
1555 | | backward compatibility, this extra flag should be explicitly specified to make the |
1556 | | calibration function use the tilted sensor model and return 14 coefficients. |
1557 | | - @ref CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during |
1558 | | the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the |
1559 | | supplied distCoeffs matrix is used. Otherwise, it is set to 0. |
1560 | | @param criteria Termination criteria for the iterative optimization algorithm. |
1561 | | |
1562 | | @return the overall RMS re-projection error. |
1563 | | |
1564 | | The function estimates the intrinsic camera parameters and extrinsic parameters for each of the |
1565 | | views. The algorithm is based on @cite Zhang2000 and @cite BouguetMCT . The coordinates of 3D object |
1566 | | points and their corresponding 2D projections in each view must be specified. That may be achieved |
1567 | | by using an object with known geometry and easily detectable feature points. Such an object is |
1568 | | called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as |
1569 | | a calibration rig (see @ref findChessboardCorners). Currently, initialization of intrinsic |
1570 | | parameters (when @ref CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration |
1571 | | patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also |
1572 | | be used as long as initial cameraMatrix is provided. |
1573 | | |
1574 | | The algorithm performs the following steps: |
1575 | | |
1576 | | - Compute the initial intrinsic parameters (the option only available for planar calibration |
1577 | | patterns) or read them from the input parameters. The distortion coefficients are all set to |
1578 | | zeros initially unless some of CALIB_FIX_K? are specified. |
1579 | | |
1580 | | - Estimate the initial camera pose as if the intrinsic parameters have been already known. This is |
1581 | | done using @ref solvePnP . |
1582 | | |
1583 | | - Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error, |
1584 | | that is, the total sum of squared distances between the observed feature points imagePoints and |
1585 | | the projected (using the current estimates for camera parameters and the poses) object points |
1586 | | objectPoints. See @ref projectPoints for details. |
1587 | | |
1588 | | @note |
1589 | | If you use a non-square (i.e. non-N-by-N) grid and @ref findChessboardCorners for calibration, |
1590 | | and @ref calibrateCamera returns bad values (zero distortion coefficients, \f$c_x\f$ and |
1591 | | \f$c_y\f$ very far from the image center, and/or large differences between \f$f_x\f$ and |
1592 | | \f$f_y\f$ (ratios of 10:1 or more)), then you are probably using patternSize=cvSize(rows,cols) |
1593 | | instead of using patternSize=cvSize(cols,rows) in @ref findChessboardCorners. |
1594 | | |
1595 | | @sa |
1596 | | calibrateCameraRO, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, |
1597 | | undistort |
1598 | | */ |
1599 | | CV_EXPORTS_AS(calibrateCameraExtended) double calibrateCamera( InputArrayOfArrays objectPoints, |
1600 | | InputArrayOfArrays imagePoints, Size imageSize, |
1601 | | InputOutputArray cameraMatrix, InputOutputArray distCoeffs, |
1602 | | OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, |
1603 | | OutputArray stdDeviationsIntrinsics, |
1604 | | OutputArray stdDeviationsExtrinsics, |
1605 | | OutputArray perViewErrors, |
1606 | | int flags = 0, TermCriteria criteria = TermCriteria( |
1607 | | TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) ); |
1608 | | |
1609 | | /** @overload */ |
1610 | | CV_EXPORTS_W double calibrateCamera( InputArrayOfArrays objectPoints, |
1611 | | InputArrayOfArrays imagePoints, Size imageSize, |
1612 | | InputOutputArray cameraMatrix, InputOutputArray distCoeffs, |
1613 | | OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, |
1614 | | int flags = 0, TermCriteria criteria = TermCriteria( |
1615 | | TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) ); |
1616 | | |
1617 | | /** @brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern. |
1618 | | |
1619 | | This function is an extension of #calibrateCamera with the method of releasing object which was |
1620 | | proposed in @cite strobl2011iccv. In many common cases with inaccurate, unmeasured, roughly planar |
1621 | | targets (calibration plates), this method can dramatically improve the precision of the estimated |
1622 | | camera parameters. Both the object-releasing method and standard method are supported by this |
1623 | | function. Use the parameter **iFixedPoint** for method selection. In the internal implementation, |
1624 | | #calibrateCamera is a wrapper for this function. |
1625 | | |
1626 | | @param objectPoints Vector of vectors of calibration pattern points in the calibration pattern |
1627 | | coordinate space. See #calibrateCamera for details. If the method of releasing object to be used, |
1628 | | the identical calibration board must be used in each view and it must be fully visible, and all |
1629 | | objectPoints[i] must be the same and all points should be roughly close to a plane. **The calibration |
1630 | | target has to be rigid, or at least static if the camera (rather than the calibration target) is |
1631 | | shifted for grabbing images.** |
1632 | | @param imagePoints Vector of vectors of the projections of calibration pattern points. See |
1633 | | #calibrateCamera for details. |
1634 | | @param imageSize Size of the image used only to initialize the intrinsic camera matrix. |
1635 | | @param iFixedPoint The index of the 3D object point in objectPoints[0] to be fixed. It also acts as |
1636 | | a switch for calibration method selection. If object-releasing method to be used, pass in the |
1637 | | parameter in the range of [1, objectPoints[0].size()-2], otherwise a value out of this range will |
1638 | | make standard calibration method selected. Usually the top-right corner point of the calibration |
1639 | | board grid is recommended to be fixed when object-releasing method being utilized. According to |
1640 | | \cite strobl2011iccv, two other points are also fixed. In this implementation, objectPoints[0].front |
1641 | | and objectPoints[0].back.z are used. With object-releasing method, accurate rvecs, tvecs and |
1642 | | newObjPoints are only possible if coordinates of these three fixed points are accurate enough. |
1643 | | @param cameraMatrix Output 3x3 floating-point camera matrix. See #calibrateCamera for details. |
1644 | | @param distCoeffs Output vector of distortion coefficients. See #calibrateCamera for details. |
1645 | | @param rvecs Output vector of rotation vectors estimated for each pattern view. See #calibrateCamera |
1646 | | for details. |
1647 | | @param tvecs Output vector of translation vectors estimated for each pattern view. |
1648 | | @param newObjPoints The updated output vector of calibration pattern points. The coordinates might |
1649 | | be scaled based on three fixed points. The returned coordinates are accurate only if the above |
1650 | | mentioned three fixed points are accurate. If not needed, noArray() can be passed in. This parameter |
1651 | | is ignored with standard calibration method. |
1652 | | @param stdDeviationsIntrinsics Output vector of standard deviations estimated for intrinsic parameters. |
1653 | | See #calibrateCamera for details. |
1654 | | @param stdDeviationsExtrinsics Output vector of standard deviations estimated for extrinsic parameters. |
1655 | | See #calibrateCamera for details. |
1656 | | @param stdDeviationsObjPoints Output vector of standard deviations estimated for refined coordinates |
1657 | | of calibration pattern points. It has the same size and order as objectPoints[0] vector. This |
1658 | | parameter is ignored with standard calibration method. |
1659 | | @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. |
1660 | | @param flags Different flags that may be zero or a combination of some predefined values. See |
1661 | | #calibrateCamera for details. If the method of releasing object is used, the calibration time may |
1662 | | be much longer. CALIB_USE_QR or CALIB_USE_LU could be used for faster calibration with potentially |
1663 | | less precise and less stable in some rare cases. |
1664 | | @param criteria Termination criteria for the iterative optimization algorithm. |
1665 | | |
1666 | | @return the overall RMS re-projection error. |
1667 | | |
1668 | | The function estimates the intrinsic camera parameters and extrinsic parameters for each of the |
1669 | | views. The algorithm is based on @cite Zhang2000, @cite BouguetMCT and @cite strobl2011iccv. See |
1670 | | #calibrateCamera for other detailed explanations. |
1671 | | @sa |
1672 | | calibrateCamera, findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort |
1673 | | */ |
1674 | | CV_EXPORTS_AS(calibrateCameraROExtended) double calibrateCameraRO( InputArrayOfArrays objectPoints, |
1675 | | InputArrayOfArrays imagePoints, Size imageSize, int iFixedPoint, |
1676 | | InputOutputArray cameraMatrix, InputOutputArray distCoeffs, |
1677 | | OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, |
1678 | | OutputArray newObjPoints, |
1679 | | OutputArray stdDeviationsIntrinsics, |
1680 | | OutputArray stdDeviationsExtrinsics, |
1681 | | OutputArray stdDeviationsObjPoints, |
1682 | | OutputArray perViewErrors, |
1683 | | int flags = 0, TermCriteria criteria = TermCriteria( |
1684 | | TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) ); |
1685 | | |
1686 | | /** @overload */ |
1687 | | CV_EXPORTS_W double calibrateCameraRO( InputArrayOfArrays objectPoints, |
1688 | | InputArrayOfArrays imagePoints, Size imageSize, int iFixedPoint, |
1689 | | InputOutputArray cameraMatrix, InputOutputArray distCoeffs, |
1690 | | OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, |
1691 | | OutputArray newObjPoints, |
1692 | | int flags = 0, TermCriteria criteria = TermCriteria( |
1693 | | TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) ); |
1694 | | |
1695 | | /** @brief Computes useful camera characteristics from the camera intrinsic matrix. |
1696 | | |
1697 | | @param cameraMatrix Input camera intrinsic matrix that can be estimated by #calibrateCamera or |
1698 | | #stereoCalibrate . |
1699 | | @param imageSize Input image size in pixels. |
1700 | | @param apertureWidth Physical width in mm of the sensor. |
1701 | | @param apertureHeight Physical height in mm of the sensor. |
1702 | | @param fovx Output field of view in degrees along the horizontal sensor axis. |
1703 | | @param fovy Output field of view in degrees along the vertical sensor axis. |
1704 | | @param focalLength Focal length of the lens in mm. |
1705 | | @param principalPoint Principal point in mm. |
1706 | | @param aspectRatio \f$f_y/f_x\f$ |
1707 | | |
1708 | | The function computes various useful camera characteristics from the previously estimated camera |
1709 | | matrix. |
1710 | | |
1711 | | @note |
1712 | | Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for |
1713 | | the chessboard pitch (it can thus be any value). |
1714 | | */ |
1715 | | CV_EXPORTS_W void calibrationMatrixValues( InputArray cameraMatrix, Size imageSize, |
1716 | | double apertureWidth, double apertureHeight, |
1717 | | CV_OUT double& fovx, CV_OUT double& fovy, |
1718 | | CV_OUT double& focalLength, CV_OUT Point2d& principalPoint, |
1719 | | CV_OUT double& aspectRatio ); |
1720 | | |
1721 | | /** @brief Calibrates a stereo camera set up. This function finds the intrinsic parameters |
1722 | | for each of the two cameras and the extrinsic parameters between the two cameras. |
1723 | | |
1724 | | @param objectPoints Vector of vectors of the calibration pattern points. The same structure as |
1725 | | in @ref calibrateCamera. For each pattern view, both cameras need to see the same object |
1726 | | points. Therefore, objectPoints.size(), imagePoints1.size(), and imagePoints2.size() need to be |
1727 | | equal as well as objectPoints[i].size(), imagePoints1[i].size(), and imagePoints2[i].size() need to |
1728 | | be equal for each i. |
1729 | | @param imagePoints1 Vector of vectors of the projections of the calibration pattern points, |
1730 | | observed by the first camera. The same structure as in @ref calibrateCamera. |
1731 | | @param imagePoints2 Vector of vectors of the projections of the calibration pattern points, |
1732 | | observed by the second camera. The same structure as in @ref calibrateCamera. |
1733 | | @param cameraMatrix1 Input/output camera intrinsic matrix for the first camera, the same as in |
1734 | | @ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. |
1735 | | @param distCoeffs1 Input/output vector of distortion coefficients, the same as in |
1736 | | @ref calibrateCamera. |
1737 | | @param cameraMatrix2 Input/output second camera intrinsic matrix for the second camera. See description for |
1738 | | cameraMatrix1. |
1739 | | @param distCoeffs2 Input/output lens distortion coefficients for the second camera. See |
1740 | | description for distCoeffs1. |
1741 | | @param imageSize Size of the image used only to initialize the camera intrinsic matrices. |
1742 | | @param R Output rotation matrix. Together with the translation vector T, this matrix brings |
1743 | | points given in the first camera's coordinate system to points in the second camera's |
1744 | | coordinate system. In more technical terms, the tuple of R and T performs a change of basis |
1745 | | from the first camera's coordinate system to the second camera's coordinate system. Due to its |
1746 | | duality, this tuple is equivalent to the position of the first camera with respect to the |
1747 | | second camera coordinate system. |
1748 | | @param T Output translation vector, see description above. |
1749 | | @param E Output essential matrix. |
1750 | | @param F Output fundamental matrix. |
1751 | | @param rvecs Output vector of rotation vectors ( @ref Rodrigues ) estimated for each pattern view in the |
1752 | | coordinate system of the first camera of the stereo pair (e.g. std::vector<cv::Mat>). More in detail, each |
1753 | | i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter |
1754 | | description) brings the calibration pattern from the object coordinate space (in which object points are |
1755 | | specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms, |
1756 | | the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space |
1757 | | to camera coordinate space of the first camera of the stereo pair. |
1758 | | @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description |
1759 | | of previous output parameter ( rvecs ). |
1760 | | @param perViewErrors Output vector of the RMS re-projection error estimated for each pattern view. |
1761 | | @param flags Different flags that may be zero or a combination of the following values: |
1762 | | - @ref CALIB_FIX_INTRINSIC Fix cameraMatrix? and distCoeffs? so that only R, T, E, and F |
1763 | | matrices are estimated. |
1764 | | - @ref CALIB_USE_INTRINSIC_GUESS Optimize some or all of the intrinsic parameters |
1765 | | according to the specified flags. Initial values are provided by the user. |
1766 | | - @ref CALIB_USE_EXTRINSIC_GUESS R and T contain valid initial values that are optimized further. |
1767 | | Otherwise R and T are initialized to the median value of the pattern views (each dimension separately). |
1768 | | - @ref CALIB_FIX_PRINCIPAL_POINT Fix the principal points during the optimization. |
1769 | | - @ref CALIB_FIX_FOCAL_LENGTH Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ . |
1770 | | - @ref CALIB_FIX_ASPECT_RATIO Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$ |
1771 | | . |
1772 | | - @ref CALIB_SAME_FOCAL_LENGTH Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ . |
1773 | | - @ref CALIB_ZERO_TANGENT_DIST Set tangential distortion coefficients for each camera to |
1774 | | zeros and fix there. |
1775 | | - @ref CALIB_FIX_K1,..., @ref CALIB_FIX_K6 Do not change the corresponding radial |
1776 | | distortion coefficient during the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, |
1777 | | the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0. |
1778 | | - @ref CALIB_RATIONAL_MODEL Enable coefficients k4, k5, and k6. To provide the backward |
1779 | | compatibility, this extra flag should be explicitly specified to make the calibration |
1780 | | function use the rational model and return 8 coefficients. If the flag is not set, the |
1781 | | function computes and returns only 5 distortion coefficients. |
1782 | | - @ref CALIB_THIN_PRISM_MODEL Coefficients s1, s2, s3 and s4 are enabled. To provide the |
1783 | | backward compatibility, this extra flag should be explicitly specified to make the |
1784 | | calibration function use the thin prism model and return 12 coefficients. If the flag is not |
1785 | | set, the function computes and returns only 5 distortion coefficients. |
1786 | | - @ref CALIB_FIX_S1_S2_S3_S4 The thin prism distortion coefficients are not changed during |
1787 | | the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the |
1788 | | supplied distCoeffs matrix is used. Otherwise, it is set to 0. |
1789 | | - @ref CALIB_TILTED_MODEL Coefficients tauX and tauY are enabled. To provide the |
1790 | | backward compatibility, this extra flag should be explicitly specified to make the |
1791 | | calibration function use the tilted sensor model and return 14 coefficients. If the flag is not |
1792 | | set, the function computes and returns only 5 distortion coefficients. |
1793 | | - @ref CALIB_FIX_TAUX_TAUY The coefficients of the tilted sensor model are not changed during |
1794 | | the optimization. If @ref CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the |
1795 | | supplied distCoeffs matrix is used. Otherwise, it is set to 0. |
1796 | | @param criteria Termination criteria for the iterative optimization algorithm. |
1797 | | |
1798 | | The function estimates the transformation between two cameras making a stereo pair. If one computes |
1799 | | the poses of an object relative to the first camera and to the second camera, |
1800 | | ( \f$R_1\f$,\f$T_1\f$ ) and (\f$R_2\f$,\f$T_2\f$), respectively, for a stereo camera where the |
1801 | | relative position and orientation between the two cameras are fixed, then those poses definitely |
1802 | | relate to each other. This means, if the relative position and orientation (\f$R\f$,\f$T\f$) of the |
1803 | | two cameras is known, it is possible to compute (\f$R_2\f$,\f$T_2\f$) when (\f$R_1\f$,\f$T_1\f$) is |
1804 | | given. This is what the described function does. It computes (\f$R\f$,\f$T\f$) such that: |
1805 | | |
1806 | | \f[R_2=R R_1\f] |
1807 | | \f[T_2=R T_1 + T.\f] |
1808 | | |
1809 | | Therefore, one can compute the coordinate representation of a 3D point for the second camera's |
1810 | | coordinate system when given the point's coordinate representation in the first camera's coordinate |
1811 | | system: |
1812 | | |
1813 | | \f[\begin{bmatrix} |
1814 | | X_2 \\ |
1815 | | Y_2 \\ |
1816 | | Z_2 \\ |
1817 | | 1 |
1818 | | \end{bmatrix} = \begin{bmatrix} |
1819 | | R & T \\ |
1820 | | 0 & 1 |
1821 | | \end{bmatrix} \begin{bmatrix} |
1822 | | X_1 \\ |
1823 | | Y_1 \\ |
1824 | | Z_1 \\ |
1825 | | 1 |
1826 | | \end{bmatrix}.\f] |
1827 | | |
1828 | | |
1829 | | Optionally, it computes the essential matrix E: |
1830 | | |
1831 | | \f[E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} R\f] |
1832 | | |
1833 | | where \f$T_i\f$ are components of the translation vector \f$T\f$ : \f$T=[T_0, T_1, T_2]^T\f$ . |
1834 | | And the function can also compute the fundamental matrix F: |
1835 | | |
1836 | | \f[F = cameraMatrix2^{-T}\cdot E \cdot cameraMatrix1^{-1}\f] |
1837 | | |
1838 | | Besides the stereo-related information, the function can also perform a full calibration of each of |
1839 | | the two cameras. However, due to the high dimensionality of the parameter space and noise in the |
1840 | | input data, the function can diverge from the correct solution. If the intrinsic parameters can be |
1841 | | estimated with high accuracy for each of the cameras individually (for example, using |
1842 | | #calibrateCamera ), you are recommended to do so and then pass @ref CALIB_FIX_INTRINSIC flag to the |
1843 | | function along with the computed intrinsic parameters. Otherwise, if all the parameters are |
1844 | | estimated at once, it makes sense to restrict some parameters, for example, pass |
1845 | | @ref CALIB_SAME_FOCAL_LENGTH and @ref CALIB_ZERO_TANGENT_DIST flags, which is usually a |
1846 | | reasonable assumption. |
1847 | | |
1848 | | Similarly to #calibrateCamera, the function minimizes the total re-projection error for all the |
1849 | | points in all the available views from both cameras. The function returns the final value of the |
1850 | | re-projection error. |
1851 | | */ |
1852 | | CV_EXPORTS_AS(stereoCalibrateExtended) double stereoCalibrate( InputArrayOfArrays objectPoints, |
1853 | | InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, |
1854 | | InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, |
1855 | | InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, |
1856 | | Size imageSize, InputOutputArray R, InputOutputArray T, OutputArray E, OutputArray F, |
1857 | | OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, OutputArray perViewErrors, int flags = CALIB_FIX_INTRINSIC, |
1858 | | TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) ); |
1859 | | |
1860 | | /// @overload |
1861 | | CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints, |
1862 | | InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, |
1863 | | InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, |
1864 | | InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, |
1865 | | Size imageSize, OutputArray R,OutputArray T, OutputArray E, OutputArray F, |
1866 | | int flags = CALIB_FIX_INTRINSIC, |
1867 | | TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) ); |
1868 | | |
1869 | | /// @overload |
1870 | | CV_EXPORTS_W double stereoCalibrate( InputArrayOfArrays objectPoints, |
1871 | | InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, |
1872 | | InputOutputArray cameraMatrix1, InputOutputArray distCoeffs1, |
1873 | | InputOutputArray cameraMatrix2, InputOutputArray distCoeffs2, |
1874 | | Size imageSize, InputOutputArray R, InputOutputArray T, OutputArray E, OutputArray F, |
1875 | | OutputArray perViewErrors, int flags = CALIB_FIX_INTRINSIC, |
1876 | | TermCriteria criteria = TermCriteria(TermCriteria::COUNT+TermCriteria::EPS, 30, 1e-6) ); |
1877 | | |
1878 | | /** @brief Computes rectification transforms for each head of a calibrated stereo camera. |
1879 | | |
1880 | | @param cameraMatrix1 First camera intrinsic matrix. |
1881 | | @param distCoeffs1 First camera distortion parameters. |
1882 | | @param cameraMatrix2 Second camera intrinsic matrix. |
1883 | | @param distCoeffs2 Second camera distortion parameters. |
1884 | | @param imageSize Size of the image used for stereo calibration. |
1885 | | @param R Rotation matrix from the coordinate system of the first camera to the second camera, |
1886 | | see @ref stereoCalibrate. |
1887 | | @param T Translation vector from the coordinate system of the first camera to the second camera, |
1888 | | see @ref stereoCalibrate. |
1889 | | @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. This matrix |
1890 | | brings points given in the unrectified first camera's coordinate system to points in the rectified |
1891 | | first camera's coordinate system. In more technical terms, it performs a change of basis from the |
1892 | | unrectified first camera's coordinate system to the rectified first camera's coordinate system. |
1893 | | @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. This matrix |
1894 | | brings points given in the unrectified second camera's coordinate system to points in the rectified |
1895 | | second camera's coordinate system. In more technical terms, it performs a change of basis from the |
1896 | | unrectified second camera's coordinate system to the rectified second camera's coordinate system. |
1897 | | @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first |
1898 | | camera, i.e. it projects points given in the rectified first camera coordinate system into the |
1899 | | rectified first camera's image. |
1900 | | @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second |
1901 | | camera, i.e. it projects points given in the rectified first camera coordinate system into the |
1902 | | rectified second camera's image. |
1903 | | @param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see @ref reprojectImageTo3D). |
1904 | | @param flags Operation flags that may be zero or @ref CALIB_ZERO_DISPARITY . If the flag is set, |
1905 | | the function makes the principal points of each camera have the same pixel coordinates in the |
1906 | | rectified views. And if the flag is not set, the function may still shift the images in the |
1907 | | horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the |
1908 | | useful image area. |
1909 | | @param alpha Free scaling parameter. If it is -1 or absent, the function performs the default |
1910 | | scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified |
1911 | | images are zoomed and shifted so that only valid pixels are visible (no black areas after |
1912 | | rectification). alpha=1 means that the rectified image is decimated and shifted so that all the |
1913 | | pixels from the original images from the cameras are retained in the rectified images (no source |
1914 | | image pixels are lost). Any intermediate value yields an intermediate result between |
1915 | | those two extreme cases. |
1916 | | @param newImageSize New image resolution after rectification. The same size should be passed to |
1917 | | #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) |
1918 | | is passed (default), it is set to the original imageSize . Setting it to a larger value can help you |
1919 | | preserve details in the original image, especially when there is a big radial distortion. |
1920 | | @param validPixROI1 Optional output rectangles inside the rectified images where all the pixels |
1921 | | are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller |
1922 | | (see the picture below). |
1923 | | @param validPixROI2 Optional output rectangles inside the rectified images where all the pixels |
1924 | | are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller |
1925 | | (see the picture below). |
1926 | | |
1927 | | The function computes the rotation matrices for each camera that (virtually) make both camera image |
1928 | | planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies |
1929 | | the dense stereo correspondence problem. The function takes the matrices computed by #stereoCalibrate |
1930 | | as input. As output, it provides two rotation matrices and also two projection matrices in the new |
1931 | | coordinates. The function distinguishes the following two cases: |
1932 | | |
1933 | | - **Horizontal stereo**: the first and the second camera views are shifted relative to each other |
1934 | | mainly along the x-axis (with possible small vertical shift). In the rectified images, the |
1935 | | corresponding epipolar lines in the left and right cameras are horizontal and have the same |
1936 | | y-coordinate. P1 and P2 look like: |
1937 | | |
1938 | | \f[\texttt{P1} = \begin{bmatrix} |
1939 | | f & 0 & cx_1 & 0 \\ |
1940 | | 0 & f & cy & 0 \\ |
1941 | | 0 & 0 & 1 & 0 |
1942 | | \end{bmatrix}\f] |
1943 | | |
1944 | | \f[\texttt{P2} = \begin{bmatrix} |
1945 | | f & 0 & cx_2 & T_x \cdot f \\ |
1946 | | 0 & f & cy & 0 \\ |
1947 | | 0 & 0 & 1 & 0 |
1948 | | \end{bmatrix} ,\f] |
1949 | | |
1950 | | \f[\texttt{Q} = \begin{bmatrix} |
1951 | | 1 & 0 & 0 & -cx_1 \\ |
1952 | | 0 & 1 & 0 & -cy \\ |
1953 | | 0 & 0 & 0 & f \\ |
1954 | | 0 & 0 & -\frac{1}{T_x} & \frac{cx_1 - cx_2}{T_x} |
1955 | | \end{bmatrix} \f] |
1956 | | |
1957 | | where \f$T_x\f$ is a horizontal shift between the cameras and \f$cx_1=cx_2\f$ if |
1958 | | @ref CALIB_ZERO_DISPARITY is set. |
1959 | | |
1960 | | - **Vertical stereo**: the first and the second camera views are shifted relative to each other |
1961 | | mainly in the vertical direction (and probably a bit in the horizontal direction too). The epipolar |
1962 | | lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like: |
1963 | | |
1964 | | \f[\texttt{P1} = \begin{bmatrix} |
1965 | | f & 0 & cx & 0 \\ |
1966 | | 0 & f & cy_1 & 0 \\ |
1967 | | 0 & 0 & 1 & 0 |
1968 | | \end{bmatrix}\f] |
1969 | | |
1970 | | \f[\texttt{P2} = \begin{bmatrix} |
1971 | | f & 0 & cx & 0 \\ |
1972 | | 0 & f & cy_2 & T_y \cdot f \\ |
1973 | | 0 & 0 & 1 & 0 |
1974 | | \end{bmatrix},\f] |
1975 | | |
1976 | | \f[\texttt{Q} = \begin{bmatrix} |
1977 | | 1 & 0 & 0 & -cx \\ |
1978 | | 0 & 1 & 0 & -cy_1 \\ |
1979 | | 0 & 0 & 0 & f \\ |
1980 | | 0 & 0 & -\frac{1}{T_y} & \frac{cy_1 - cy_2}{T_y} |
1981 | | \end{bmatrix} \f] |
1982 | | |
1983 | | where \f$T_y\f$ is a vertical shift between the cameras and \f$cy_1=cy_2\f$ if |
1984 | | @ref CALIB_ZERO_DISPARITY is set. |
1985 | | |
1986 | | As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera |
1987 | | matrices. The matrices, together with R1 and R2 , can then be passed to #initUndistortRectifyMap to |
1988 | | initialize the rectification map for each camera. |
1989 | | |
1990 | | See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through |
1991 | | the corresponding image regions. This means that the images are well rectified, which is what most |
1992 | | stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that |
1993 | | their interiors are all valid pixels. |
1994 | | |
1995 | |  |
1996 | | */ |
1997 | | CV_EXPORTS_W void stereoRectify( InputArray cameraMatrix1, InputArray distCoeffs1, |
1998 | | InputArray cameraMatrix2, InputArray distCoeffs2, |
1999 | | Size imageSize, InputArray R, InputArray T, |
2000 | | OutputArray R1, OutputArray R2, |
2001 | | OutputArray P1, OutputArray P2, |
2002 | | OutputArray Q, int flags = CALIB_ZERO_DISPARITY, |
2003 | | double alpha = -1, Size newImageSize = Size(), |
2004 | | CV_OUT Rect* validPixROI1 = 0, CV_OUT Rect* validPixROI2 = 0 ); |
2005 | | |
2006 | | /** @brief Computes a rectification transform for an uncalibrated stereo camera. |
2007 | | |
2008 | | @param points1 Array of feature points in the first image. |
2009 | | @param points2 The corresponding points in the second image. The same formats as in |
2010 | | #findFundamentalMat are supported. |
2011 | | @param F Input fundamental matrix. It can be computed from the same set of point pairs using |
2012 | | #findFundamentalMat . |
2013 | | @param imgSize Size of the image. |
2014 | | @param H1 Output rectification homography matrix for the first image. |
2015 | | @param H2 Output rectification homography matrix for the second image. |
2016 | | @param threshold Optional threshold used to filter out the outliers. If the parameter is greater |
2017 | | than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points |
2018 | | for which \f$|\texttt{points2[i]}^T \cdot \texttt{F} \cdot \texttt{points1[i]}|>\texttt{threshold}\f$ ) |
2019 | | are rejected prior to computing the homographies. Otherwise, all the points are considered inliers. |
2020 | | |
2021 | | The function computes the rectification transformations without knowing intrinsic parameters of the |
2022 | | cameras and their relative position in the space, which explains the suffix "uncalibrated". Another |
2023 | | related difference from #stereoRectify is that the function outputs not the rectification |
2024 | | transformations in the object (3D) space, but the planar perspective transformations encoded by the |
2025 | | homography matrices H1 and H2 . The function implements the algorithm @cite Hartley99 . |
2026 | | |
2027 | | @note |
2028 | | While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily |
2029 | | depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion, |
2030 | | it would be better to correct it before computing the fundamental matrix and calling this |
2031 | | function. For example, distortion coefficients can be estimated for each head of stereo camera |
2032 | | separately by using #calibrateCamera . Then, the images can be corrected using #undistort , or |
2033 | | just the point coordinates can be corrected with #undistortPoints . |
2034 | | */ |
2035 | | CV_EXPORTS_W bool stereoRectifyUncalibrated( InputArray points1, InputArray points2, |
2036 | | InputArray F, Size imgSize, |
2037 | | OutputArray H1, OutputArray H2, |
2038 | | double threshold = 5 ); |
2039 | | |
2040 | | //! computes the rectification transformations for 3-head camera, where all the heads are on the same line. |
2041 | | CV_EXPORTS_W float rectify3Collinear( InputArray cameraMatrix1, InputArray distCoeffs1, |
2042 | | InputArray cameraMatrix2, InputArray distCoeffs2, |
2043 | | InputArray cameraMatrix3, InputArray distCoeffs3, |
2044 | | InputArrayOfArrays imgpt1, InputArrayOfArrays imgpt3, |
2045 | | Size imageSize, InputArray R12, InputArray T12, |
2046 | | InputArray R13, InputArray T13, |
2047 | | OutputArray R1, OutputArray R2, OutputArray R3, |
2048 | | OutputArray P1, OutputArray P2, OutputArray P3, |
2049 | | OutputArray Q, double alpha, Size newImgSize, |
2050 | | CV_OUT Rect* roi1, CV_OUT Rect* roi2, int flags ); |
2051 | | |
2052 | | /** @brief Returns the new camera intrinsic matrix based on the free scaling parameter. |
2053 | | |
2054 | | @param cameraMatrix Input camera intrinsic matrix. |
2055 | | @param distCoeffs Input vector of distortion coefficients |
2056 | | \f$\distcoeffs\f$. If the vector is NULL/empty, the zero distortion coefficients are |
2057 | | assumed. |
2058 | | @param imageSize Original image size. |
2059 | | @param alpha Free scaling parameter between 0 (when all the pixels in the undistorted image are |
2060 | | valid) and 1 (when all the source image pixels are retained in the undistorted image). See |
2061 | | #stereoRectify for details. |
2062 | | @param newImgSize Image size after rectification. By default, it is set to imageSize . |
2063 | | @param validPixROI Optional output rectangle that outlines all-good-pixels region in the |
2064 | | undistorted image. See roi1, roi2 description in #stereoRectify . |
2065 | | @param centerPrincipalPoint Optional flag that indicates whether in the new camera intrinsic matrix the |
2066 | | principal point should be at the image center or not. By default, the principal point is chosen to |
2067 | | best fit a subset of the source image (determined by alpha) to the corrected image. |
2068 | | @return new_camera_matrix Output new camera intrinsic matrix. |
2069 | | |
2070 | | The function computes and returns the optimal new camera intrinsic matrix based on the free scaling parameter. |
2071 | | By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original |
2072 | | image pixels if there is valuable information in the corners alpha=1 , or get something in between. |
2073 | | When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to |
2074 | | "virtual" pixels outside of the captured distorted image. The original camera intrinsic matrix, distortion |
2075 | | coefficients, the computed new camera intrinsic matrix, and newImageSize should be passed to |
2076 | | #initUndistortRectifyMap to produce the maps for #remap . |
2077 | | */ |
2078 | | CV_EXPORTS_W Mat getOptimalNewCameraMatrix( InputArray cameraMatrix, InputArray distCoeffs, |
2079 | | Size imageSize, double alpha, Size newImgSize = Size(), |
2080 | | CV_OUT Rect* validPixROI = 0, |
2081 | | bool centerPrincipalPoint = false); |
2082 | | |
2083 | | /** @brief Computes Hand-Eye calibration: \f$_{}^{g}\textrm{T}_c\f$ |
2084 | | |
2085 | | @param[in] R_gripper2base Rotation part extracted from the homogeneous matrix that transforms a point |
2086 | | expressed in the gripper frame to the robot base frame (\f$_{}^{b}\textrm{T}_g\f$). |
2087 | | This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors, |
2088 | | for all the transformations from gripper frame to robot base frame. |
2089 | | @param[in] t_gripper2base Translation part extracted from the homogeneous matrix that transforms a point |
2090 | | expressed in the gripper frame to the robot base frame (\f$_{}^{b}\textrm{T}_g\f$). |
2091 | | This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations |
2092 | | from gripper frame to robot base frame. |
2093 | | @param[in] R_target2cam Rotation part extracted from the homogeneous matrix that transforms a point |
2094 | | expressed in the target frame to the camera frame (\f$_{}^{c}\textrm{T}_t\f$). |
2095 | | This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors, |
2096 | | for all the transformations from calibration target frame to camera frame. |
2097 | | @param[in] t_target2cam Rotation part extracted from the homogeneous matrix that transforms a point |
2098 | | expressed in the target frame to the camera frame (\f$_{}^{c}\textrm{T}_t\f$). |
2099 | | This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations |
2100 | | from calibration target frame to camera frame. |
2101 | | @param[out] R_cam2gripper Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point |
2102 | | expressed in the camera frame to the gripper frame (\f$_{}^{g}\textrm{T}_c\f$). |
2103 | | @param[out] t_cam2gripper Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point |
2104 | | expressed in the camera frame to the gripper frame (\f$_{}^{g}\textrm{T}_c\f$). |
2105 | | @param[in] method One of the implemented Hand-Eye calibration method, see cv::HandEyeCalibrationMethod |
2106 | | |
2107 | | The function performs the Hand-Eye calibration using various methods. One approach consists in estimating the |
2108 | | rotation then the translation (separable solutions) and the following methods are implemented: |
2109 | | - R. Tsai, R. Lenz A New Technique for Fully Autonomous and Efficient 3D Robotics Hand/EyeCalibration \cite Tsai89 |
2110 | | - F. Park, B. Martin Robot Sensor Calibration: Solving AX = XB on the Euclidean Group \cite Park94 |
2111 | | - R. Horaud, F. Dornaika Hand-Eye Calibration \cite Horaud95 |
2112 | | |
2113 | | Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions), |
2114 | | with the following implemented methods: |
2115 | | - N. Andreff, R. Horaud, B. Espiau On-line Hand-Eye Calibration \cite Andreff99 |
2116 | | - K. Daniilidis Hand-Eye Calibration Using Dual Quaternions \cite Daniilidis98 |
2117 | | |
2118 | | The following picture describes the Hand-Eye calibration problem where the transformation between a camera ("eye") |
2119 | | mounted on a robot gripper ("hand") has to be estimated. This configuration is called eye-in-hand. |
2120 | | |
2121 | | The eye-to-hand configuration consists in a static camera observing a calibration pattern mounted on the robot |
2122 | | end-effector. The transformation from the camera to the robot base frame can then be estimated by inputting |
2123 | | the suitable transformations to the function, see below. |
2124 | | |
2125 | |  |
2126 | | |
2127 | | The calibration procedure is the following: |
2128 | | - a static calibration pattern is used to estimate the transformation between the target frame |
2129 | | and the camera frame |
2130 | | - the robot gripper is moved in order to acquire several poses |
2131 | | - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for |
2132 | | instance the robot kinematics |
2133 | | \f[ |
2134 | | \begin{bmatrix} |
2135 | | X_b\\ |
2136 | | Y_b\\ |
2137 | | Z_b\\ |
2138 | | 1 |
2139 | | \end{bmatrix} |
2140 | | = |
2141 | | \begin{bmatrix} |
2142 | | _{}^{b}\textrm{R}_g & _{}^{b}\textrm{t}_g \\ |
2143 | | 0_{1 \times 3} & 1 |
2144 | | \end{bmatrix} |
2145 | | \begin{bmatrix} |
2146 | | X_g\\ |
2147 | | Y_g\\ |
2148 | | Z_g\\ |
2149 | | 1 |
2150 | | \end{bmatrix} |
2151 | | \f] |
2152 | | - for each pose, the homogeneous transformation between the calibration target frame and the camera frame is recorded using |
2153 | | for instance a pose estimation method (PnP) from 2D-3D point correspondences |
2154 | | \f[ |
2155 | | \begin{bmatrix} |
2156 | | X_c\\ |
2157 | | Y_c\\ |
2158 | | Z_c\\ |
2159 | | 1 |
2160 | | \end{bmatrix} |
2161 | | = |
2162 | | \begin{bmatrix} |
2163 | | _{}^{c}\textrm{R}_t & _{}^{c}\textrm{t}_t \\ |
2164 | | 0_{1 \times 3} & 1 |
2165 | | \end{bmatrix} |
2166 | | \begin{bmatrix} |
2167 | | X_t\\ |
2168 | | Y_t\\ |
2169 | | Z_t\\ |
2170 | | 1 |
2171 | | \end{bmatrix} |
2172 | | \f] |
2173 | | |
2174 | | The Hand-Eye calibration procedure returns the following homogeneous transformation |
2175 | | \f[ |
2176 | | \begin{bmatrix} |
2177 | | X_g\\ |
2178 | | Y_g\\ |
2179 | | Z_g\\ |
2180 | | 1 |
2181 | | \end{bmatrix} |
2182 | | = |
2183 | | \begin{bmatrix} |
2184 | | _{}^{g}\textrm{R}_c & _{}^{g}\textrm{t}_c \\ |
2185 | | 0_{1 \times 3} & 1 |
2186 | | \end{bmatrix} |
2187 | | \begin{bmatrix} |
2188 | | X_c\\ |
2189 | | Y_c\\ |
2190 | | Z_c\\ |
2191 | | 1 |
2192 | | \end{bmatrix} |
2193 | | \f] |
2194 | | |
2195 | | This problem is also known as solving the \f$\mathbf{A}\mathbf{X}=\mathbf{X}\mathbf{B}\f$ equation: |
2196 | | - for an eye-in-hand configuration |
2197 | | \f[ |
2198 | | \begin{align*} |
2199 | | ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &= |
2200 | | \hspace{0.1em} ^{b}{\textrm{T}_g}^{(2)} \hspace{0.2em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\ |
2201 | | |
2202 | | (^{b}{\textrm{T}_g}^{(2)})^{-1} \hspace{0.2em} ^{b}{\textrm{T}_g}^{(1)} \hspace{0.2em} ^{g}\textrm{T}_c &= |
2203 | | \hspace{0.1em} ^{g}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\ |
2204 | | |
2205 | | \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\ |
2206 | | \end{align*} |
2207 | | \f] |
2208 | | |
2209 | | - for an eye-to-hand configuration |
2210 | | \f[ |
2211 | | \begin{align*} |
2212 | | ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(1)} &= |
2213 | | \hspace{0.1em} ^{g}{\textrm{T}_b}^{(2)} \hspace{0.2em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} \\ |
2214 | | |
2215 | | (^{g}{\textrm{T}_b}^{(2)})^{-1} \hspace{0.2em} ^{g}{\textrm{T}_b}^{(1)} \hspace{0.2em} ^{b}\textrm{T}_c &= |
2216 | | \hspace{0.1em} ^{b}\textrm{T}_c \hspace{0.2em} ^{c}{\textrm{T}_t}^{(2)} (^{c}{\textrm{T}_t}^{(1)})^{-1} \\ |
2217 | | |
2218 | | \textrm{A}_i \textrm{X} &= \textrm{X} \textrm{B}_i \\ |
2219 | | \end{align*} |
2220 | | \f] |
2221 | | |
2222 | | \note |
2223 | | Additional information can be found on this [website](http://campar.in.tum.de/Chair/HandEyeCalibration). |
2224 | | \note |
2225 | | A minimum of 2 motions with non parallel rotation axes are necessary to determine the hand-eye transformation. |
2226 | | So at least 3 different poses are required, but it is strongly recommended to use many more poses. |
2227 | | |
2228 | | */ |
2229 | | CV_EXPORTS_W void calibrateHandEye( InputArrayOfArrays R_gripper2base, InputArrayOfArrays t_gripper2base, |
2230 | | InputArrayOfArrays R_target2cam, InputArrayOfArrays t_target2cam, |
2231 | | OutputArray R_cam2gripper, OutputArray t_cam2gripper, |
2232 | | HandEyeCalibrationMethod method=CALIB_HAND_EYE_TSAI ); |
2233 | | |
2234 | | /** @brief Computes Robot-World/Hand-Eye calibration: \f$_{}^{w}\textrm{T}_b\f$ and \f$_{}^{c}\textrm{T}_g\f$ |
2235 | | |
2236 | | @param[in] R_world2cam Rotation part extracted from the homogeneous matrix that transforms a point |
2237 | | expressed in the world frame to the camera frame (\f$_{}^{c}\textrm{T}_w\f$). |
2238 | | This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors, |
2239 | | for all the transformations from world frame to the camera frame. |
2240 | | @param[in] t_world2cam Translation part extracted from the homogeneous matrix that transforms a point |
2241 | | expressed in the world frame to the camera frame (\f$_{}^{c}\textrm{T}_w\f$). |
2242 | | This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations |
2243 | | from world frame to the camera frame. |
2244 | | @param[in] R_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point |
2245 | | expressed in the robot base frame to the gripper frame (\f$_{}^{g}\textrm{T}_b\f$). |
2246 | | This is a vector (`vector<Mat>`) that contains the rotation, `(3x3)` rotation matrices or `(3x1)` rotation vectors, |
2247 | | for all the transformations from robot base frame to the gripper frame. |
2248 | | @param[in] t_base2gripper Rotation part extracted from the homogeneous matrix that transforms a point |
2249 | | expressed in the robot base frame to the gripper frame (\f$_{}^{g}\textrm{T}_b\f$). |
2250 | | This is a vector (`vector<Mat>`) that contains the `(3x1)` translation vectors for all the transformations |
2251 | | from robot base frame to the gripper frame. |
2252 | | @param[out] R_base2world Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point |
2253 | | expressed in the robot base frame to the world frame (\f$_{}^{w}\textrm{T}_b\f$). |
2254 | | @param[out] t_base2world Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point |
2255 | | expressed in the robot base frame to the world frame (\f$_{}^{w}\textrm{T}_b\f$). |
2256 | | @param[out] R_gripper2cam Estimated `(3x3)` rotation part extracted from the homogeneous matrix that transforms a point |
2257 | | expressed in the gripper frame to the camera frame (\f$_{}^{c}\textrm{T}_g\f$). |
2258 | | @param[out] t_gripper2cam Estimated `(3x1)` translation part extracted from the homogeneous matrix that transforms a point |
2259 | | expressed in the gripper frame to the camera frame (\f$_{}^{c}\textrm{T}_g\f$). |
2260 | | @param[in] method One of the implemented Robot-World/Hand-Eye calibration method, see cv::RobotWorldHandEyeCalibrationMethod |
2261 | | |
2262 | | The function performs the Robot-World/Hand-Eye calibration using various methods. One approach consists in estimating the |
2263 | | rotation then the translation (separable solutions): |
2264 | | - M. Shah, Solving the robot-world/hand-eye calibration problem using the kronecker product \cite Shah2013SolvingTR |
2265 | | |
2266 | | Another approach consists in estimating simultaneously the rotation and the translation (simultaneous solutions), |
2267 | | with the following implemented method: |
2268 | | - A. Li, L. Wang, and D. Wu, Simultaneous robot-world and hand-eye calibration using dual-quaternions and kronecker product \cite Li2010SimultaneousRA |
2269 | | |
2270 | | The following picture describes the Robot-World/Hand-Eye calibration problem where the transformations between a robot and a world frame |
2271 | | and between a robot gripper ("hand") and a camera ("eye") mounted at the robot end-effector have to be estimated. |
2272 | | |
2273 | |  |
2274 | | |
2275 | | The calibration procedure is the following: |
2276 | | - a static calibration pattern is used to estimate the transformation between the target frame |
2277 | | and the camera frame |
2278 | | - the robot gripper is moved in order to acquire several poses |
2279 | | - for each pose, the homogeneous transformation between the gripper frame and the robot base frame is recorded using for |
2280 | | instance the robot kinematics |
2281 | | \f[ |
2282 | | \begin{bmatrix} |
2283 | | X_g\\ |
2284 | | Y_g\\ |
2285 | | Z_g\\ |
2286 | | 1 |
2287 | | \end{bmatrix} |
2288 | | = |
2289 | | \begin{bmatrix} |
2290 | | _{}^{g}\textrm{R}_b & _{}^{g}\textrm{t}_b \\ |
2291 | | 0_{1 \times 3} & 1 |
2292 | | \end{bmatrix} |
2293 | | \begin{bmatrix} |
2294 | | X_b\\ |
2295 | | Y_b\\ |
2296 | | Z_b\\ |
2297 | | 1 |
2298 | | \end{bmatrix} |
2299 | | \f] |
2300 | | - for each pose, the homogeneous transformation between the calibration target frame (the world frame) and the camera frame is recorded using |
2301 | | for instance a pose estimation method (PnP) from 2D-3D point correspondences |
2302 | | \f[ |
2303 | | \begin{bmatrix} |
2304 | | X_c\\ |
2305 | | Y_c\\ |
2306 | | Z_c\\ |
2307 | | 1 |
2308 | | \end{bmatrix} |
2309 | | = |
2310 | | \begin{bmatrix} |
2311 | | _{}^{c}\textrm{R}_w & _{}^{c}\textrm{t}_w \\ |
2312 | | 0_{1 \times 3} & 1 |
2313 | | \end{bmatrix} |
2314 | | \begin{bmatrix} |
2315 | | X_w\\ |
2316 | | Y_w\\ |
2317 | | Z_w\\ |
2318 | | 1 |
2319 | | \end{bmatrix} |
2320 | | \f] |
2321 | | |
2322 | | The Robot-World/Hand-Eye calibration procedure returns the following homogeneous transformations |
2323 | | \f[ |
2324 | | \begin{bmatrix} |
2325 | | X_w\\ |
2326 | | Y_w\\ |
2327 | | Z_w\\ |
2328 | | 1 |
2329 | | \end{bmatrix} |
2330 | | = |
2331 | | \begin{bmatrix} |
2332 | | _{}^{w}\textrm{R}_b & _{}^{w}\textrm{t}_b \\ |
2333 | | 0_{1 \times 3} & 1 |
2334 | | \end{bmatrix} |
2335 | | \begin{bmatrix} |
2336 | | X_b\\ |
2337 | | Y_b\\ |
2338 | | Z_b\\ |
2339 | | 1 |
2340 | | \end{bmatrix} |
2341 | | \f] |
2342 | | \f[ |
2343 | | \begin{bmatrix} |
2344 | | X_c\\ |
2345 | | Y_c\\ |
2346 | | Z_c\\ |
2347 | | 1 |
2348 | | \end{bmatrix} |
2349 | | = |
2350 | | \begin{bmatrix} |
2351 | | _{}^{c}\textrm{R}_g & _{}^{c}\textrm{t}_g \\ |
2352 | | 0_{1 \times 3} & 1 |
2353 | | \end{bmatrix} |
2354 | | \begin{bmatrix} |
2355 | | X_g\\ |
2356 | | Y_g\\ |
2357 | | Z_g\\ |
2358 | | 1 |
2359 | | \end{bmatrix} |
2360 | | \f] |
2361 | | |
2362 | | This problem is also known as solving the \f$\mathbf{A}\mathbf{X}=\mathbf{Z}\mathbf{B}\f$ equation, with: |
2363 | | - \f$\mathbf{A} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_w\f$ |
2364 | | - \f$\mathbf{X} \Leftrightarrow \hspace{0.1em} _{}^{w}\textrm{T}_b\f$ |
2365 | | - \f$\mathbf{Z} \Leftrightarrow \hspace{0.1em} _{}^{c}\textrm{T}_g\f$ |
2366 | | - \f$\mathbf{B} \Leftrightarrow \hspace{0.1em} _{}^{g}\textrm{T}_b\f$ |
2367 | | |
2368 | | \note |
2369 | | At least 3 measurements are required (input vectors size must be greater or equal to 3). |
2370 | | |
2371 | | */ |
2372 | | CV_EXPORTS_W void calibrateRobotWorldHandEye( InputArrayOfArrays R_world2cam, InputArrayOfArrays t_world2cam, |
2373 | | InputArrayOfArrays R_base2gripper, InputArrayOfArrays t_base2gripper, |
2374 | | OutputArray R_base2world, OutputArray t_base2world, |
2375 | | OutputArray R_gripper2cam, OutputArray t_gripper2cam, |
2376 | | RobotWorldHandEyeCalibrationMethod method=CALIB_ROBOT_WORLD_HAND_EYE_SHAH ); |
2377 | | |
2378 | | /** @brief Converts points from Euclidean to homogeneous space. |
2379 | | |
2380 | | @param src Input vector of N-dimensional points. |
2381 | | @param dst Output vector of N+1-dimensional points. |
2382 | | |
2383 | | The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of |
2384 | | point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1). |
2385 | | */ |
2386 | | CV_EXPORTS_W void convertPointsToHomogeneous( InputArray src, OutputArray dst ); |
2387 | | |
2388 | | /** @brief Converts points from homogeneous to Euclidean space. |
2389 | | |
2390 | | @param src Input vector of N-dimensional points. |
2391 | | @param dst Output vector of N-1-dimensional points. |
2392 | | |
2393 | | The function converts points homogeneous to Euclidean space using perspective projection. That is, |
2394 | | each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the |
2395 | | output point coordinates will be (0,0,0,...). |
2396 | | */ |
2397 | | CV_EXPORTS_W void convertPointsFromHomogeneous( InputArray src, OutputArray dst ); |
2398 | | |
2399 | | /** @brief Converts points to/from homogeneous coordinates. |
2400 | | |
2401 | | @param src Input array or vector of 2D, 3D, or 4D points. |
2402 | | @param dst Output vector of 2D, 3D, or 4D points. |
2403 | | |
2404 | | The function converts 2D or 3D points from/to homogeneous coordinates by calling either |
2405 | | #convertPointsToHomogeneous or #convertPointsFromHomogeneous. |
2406 | | |
2407 | | @note The function is obsolete. Use one of the previous two functions instead. |
2408 | | */ |
2409 | | CV_EXPORTS void convertPointsHomogeneous( InputArray src, OutputArray dst ); |
2410 | | |
2411 | | /** @brief Calculates a fundamental matrix from the corresponding points in two images. |
2412 | | |
2413 | | @param points1 Array of N points from the first image. The point coordinates should be |
2414 | | floating-point (single or double precision). |
2415 | | @param points2 Array of the second image points of the same size and format as points1 . |
2416 | | @param method Method for computing a fundamental matrix. |
2417 | | - @ref FM_7POINT for a 7-point algorithm. \f$N = 7\f$ |
2418 | | - @ref FM_8POINT for an 8-point algorithm. \f$N \ge 8\f$ |
2419 | | - @ref FM_RANSAC for the RANSAC algorithm. \f$N \ge 8\f$ |
2420 | | - @ref FM_LMEDS for the LMedS algorithm. \f$N \ge 8\f$ |
2421 | | @param ransacReprojThreshold Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar |
2422 | | line in pixels, beyond which the point is considered an outlier and is not used for computing the |
2423 | | final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the |
2424 | | point localization, image resolution, and the image noise. |
2425 | | @param confidence Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level |
2426 | | of confidence (probability) that the estimated matrix is correct. |
2427 | | @param[out] mask optional output mask |
2428 | | @param maxIters The maximum number of robust method iterations. |
2429 | | |
2430 | | The epipolar geometry is described by the following equation: |
2431 | | |
2432 | | \f[[p_2; 1]^T F [p_1; 1] = 0\f] |
2433 | | |
2434 | | where \f$F\f$ is a fundamental matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the |
2435 | | second images, respectively. |
2436 | | |
2437 | | The function calculates the fundamental matrix using one of four methods listed above and returns |
2438 | | the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point |
2439 | | algorithm, the function may return up to 3 solutions ( \f$9 \times 3\f$ matrix that stores all 3 |
2440 | | matrices sequentially). |
2441 | | |
2442 | | The calculated fundamental matrix may be passed further to #computeCorrespondEpilines that finds the |
2443 | | epipolar lines corresponding to the specified points. It can also be passed to |
2444 | | #stereoRectifyUncalibrated to compute the rectification transformation. : |
2445 | | @code |
2446 | | // Example. Estimation of fundamental matrix using the RANSAC algorithm |
2447 | | int point_count = 100; |
2448 | | vector<Point2f> points1(point_count); |
2449 | | vector<Point2f> points2(point_count); |
2450 | | |
2451 | | // initialize the points here ... |
2452 | | for( int i = 0; i < point_count; i++ ) |
2453 | | { |
2454 | | points1[i] = ...; |
2455 | | points2[i] = ...; |
2456 | | } |
2457 | | |
2458 | | Mat fundamental_matrix = |
2459 | | findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99); |
2460 | | @endcode |
2461 | | */ |
2462 | | CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2, |
2463 | | int method, double ransacReprojThreshold, double confidence, |
2464 | | int maxIters, OutputArray mask = noArray() ); |
2465 | | |
2466 | | /** @overload */ |
2467 | | CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2, |
2468 | | int method = FM_RANSAC, |
2469 | | double ransacReprojThreshold = 3., double confidence = 0.99, |
2470 | | OutputArray mask = noArray() ); |
2471 | | |
2472 | | /** @overload */ |
2473 | | CV_EXPORTS Mat findFundamentalMat( InputArray points1, InputArray points2, |
2474 | | OutputArray mask, int method = FM_RANSAC, |
2475 | | double ransacReprojThreshold = 3., double confidence = 0.99 ); |
2476 | | |
2477 | | |
2478 | | CV_EXPORTS_W Mat findFundamentalMat( InputArray points1, InputArray points2, |
2479 | | OutputArray mask, const UsacParams ¶ms); |
2480 | | |
2481 | | /** @brief Calculates an essential matrix from the corresponding points in two images. |
2482 | | |
2483 | | @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should |
2484 | | be floating-point (single or double precision). |
2485 | | @param points2 Array of the second image points of the same size and format as points1 . |
2486 | | @param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ . |
2487 | | Note that this function assumes that points1 and points2 are feature points from cameras with the |
2488 | | same camera intrinsic matrix. If this assumption does not hold for your use case, use |
2489 | | #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points |
2490 | | to normalized image coordinates, which are valid for the identity camera intrinsic matrix. When |
2491 | | passing these coordinates, pass the identity matrix for this parameter. |
2492 | | @param method Method for computing an essential matrix. |
2493 | | - @ref RANSAC for the RANSAC algorithm. |
2494 | | - @ref LMEDS for the LMedS algorithm. |
2495 | | @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of |
2496 | | confidence (probability) that the estimated matrix is correct. |
2497 | | @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar |
2498 | | line in pixels, beyond which the point is considered an outlier and is not used for computing the |
2499 | | final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the |
2500 | | point localization, image resolution, and the image noise. |
2501 | | @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1 |
2502 | | for the other points. The array is computed only in the RANSAC and LMedS methods. |
2503 | | @param maxIters The maximum number of robust method iterations. |
2504 | | |
2505 | | This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 . |
2506 | | @cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation: |
2507 | | |
2508 | | \f[[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\f] |
2509 | | |
2510 | | where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the |
2511 | | second images, respectively. The result of this function may be passed further to |
2512 | | #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. |
2513 | | */ |
2514 | | CV_EXPORTS_W |
2515 | | Mat findEssentialMat( |
2516 | | InputArray points1, InputArray points2, |
2517 | | InputArray cameraMatrix, int method = RANSAC, |
2518 | | double prob = 0.999, double threshold = 1.0, |
2519 | | int maxIters = 1000, OutputArray mask = noArray() |
2520 | | ); |
2521 | | |
2522 | | /** @overload */ |
2523 | | CV_EXPORTS |
2524 | | Mat findEssentialMat( |
2525 | | InputArray points1, InputArray points2, |
2526 | | InputArray cameraMatrix, int method, |
2527 | | double prob, double threshold, |
2528 | | OutputArray mask |
2529 | | ); // TODO remove from OpenCV 5.0 |
2530 | | |
2531 | | /** @overload |
2532 | | @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should |
2533 | | be floating-point (single or double precision). |
2534 | | @param points2 Array of the second image points of the same size and format as points1 . |
2535 | | @param focal focal length of the camera. Note that this function assumes that points1 and points2 |
2536 | | are feature points from cameras with same focal length and principal point. |
2537 | | @param pp principal point of the camera. |
2538 | | @param method Method for computing a fundamental matrix. |
2539 | | - @ref RANSAC for the RANSAC algorithm. |
2540 | | - @ref LMEDS for the LMedS algorithm. |
2541 | | @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar |
2542 | | line in pixels, beyond which the point is considered an outlier and is not used for computing the |
2543 | | final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the |
2544 | | point localization, image resolution, and the image noise. |
2545 | | @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of |
2546 | | confidence (probability) that the estimated matrix is correct. |
2547 | | @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1 |
2548 | | for the other points. The array is computed only in the RANSAC and LMedS methods. |
2549 | | @param maxIters The maximum number of robust method iterations. |
2550 | | |
2551 | | This function differs from the one above that it computes camera intrinsic matrix from focal length and |
2552 | | principal point: |
2553 | | |
2554 | | \f[A = |
2555 | | \begin{bmatrix} |
2556 | | f & 0 & x_{pp} \\ |
2557 | | 0 & f & y_{pp} \\ |
2558 | | 0 & 0 & 1 |
2559 | | \end{bmatrix}\f] |
2560 | | */ |
2561 | | CV_EXPORTS_W |
2562 | | Mat findEssentialMat( |
2563 | | InputArray points1, InputArray points2, |
2564 | | double focal = 1.0, Point2d pp = Point2d(0, 0), |
2565 | | int method = RANSAC, double prob = 0.999, |
2566 | | double threshold = 1.0, int maxIters = 1000, |
2567 | | OutputArray mask = noArray() |
2568 | | ); |
2569 | | |
2570 | | /** @overload */ |
2571 | | CV_EXPORTS |
2572 | | Mat findEssentialMat( |
2573 | | InputArray points1, InputArray points2, |
2574 | | double focal, Point2d pp, |
2575 | | int method, double prob, |
2576 | | double threshold, OutputArray mask |
2577 | | ); // TODO remove from OpenCV 5.0 |
2578 | | |
2579 | | /** @brief Calculates an essential matrix from the corresponding points in two images from potentially two different cameras. |
2580 | | |
2581 | | @param points1 Array of N (N \>= 5) 2D points from the first image. The point coordinates should |
2582 | | be floating-point (single or double precision). |
2583 | | @param points2 Array of the second image points of the same size and format as points1 . |
2584 | | @param cameraMatrix1 Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . |
2585 | | Note that this function assumes that points1 and points2 are feature points from cameras with the |
2586 | | same camera matrix. If this assumption does not hold for your use case, use |
2587 | | #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points |
2588 | | to normalized image coordinates, which are valid for the identity camera matrix. When |
2589 | | passing these coordinates, pass the identity matrix for this parameter. |
2590 | | @param cameraMatrix2 Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . |
2591 | | Note that this function assumes that points1 and points2 are feature points from cameras with the |
2592 | | same camera matrix. If this assumption does not hold for your use case, use |
2593 | | #undistortPoints with `P = cv::NoArray()` for both cameras to transform image points |
2594 | | to normalized image coordinates, which are valid for the identity camera matrix. When |
2595 | | passing these coordinates, pass the identity matrix for this parameter. |
2596 | | @param distCoeffs1 Input vector of distortion coefficients |
2597 | | \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ |
2598 | | of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. |
2599 | | @param distCoeffs2 Input vector of distortion coefficients |
2600 | | \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ |
2601 | | of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. |
2602 | | @param method Method for computing an essential matrix. |
2603 | | - @ref RANSAC for the RANSAC algorithm. |
2604 | | - @ref LMEDS for the LMedS algorithm. |
2605 | | @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of |
2606 | | confidence (probability) that the estimated matrix is correct. |
2607 | | @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar |
2608 | | line in pixels, beyond which the point is considered an outlier and is not used for computing the |
2609 | | final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the |
2610 | | point localization, image resolution, and the image noise. |
2611 | | @param mask Output array of N elements, every element of which is set to 0 for outliers and to 1 |
2612 | | for the other points. The array is computed only in the RANSAC and LMedS methods. |
2613 | | |
2614 | | This function estimates essential matrix based on the five-point algorithm solver in @cite Nister03 . |
2615 | | @cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation: |
2616 | | |
2617 | | \f[[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\f] |
2618 | | |
2619 | | where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the |
2620 | | second images, respectively. The result of this function may be passed further to |
2621 | | #decomposeEssentialMat or #recoverPose to recover the relative pose between cameras. |
2622 | | */ |
2623 | | CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2, |
2624 | | InputArray cameraMatrix1, InputArray distCoeffs1, |
2625 | | InputArray cameraMatrix2, InputArray distCoeffs2, |
2626 | | int method = RANSAC, |
2627 | | double prob = 0.999, double threshold = 1.0, |
2628 | | OutputArray mask = noArray() ); |
2629 | | |
2630 | | |
2631 | | CV_EXPORTS_W Mat findEssentialMat( InputArray points1, InputArray points2, |
2632 | | InputArray cameraMatrix1, InputArray cameraMatrix2, |
2633 | | InputArray dist_coeff1, InputArray dist_coeff2, OutputArray mask, |
2634 | | const UsacParams ¶ms); |
2635 | | |
2636 | | /** @brief Decompose an essential matrix to possible rotations and translation. |
2637 | | |
2638 | | @param E The input essential matrix. |
2639 | | @param R1 One possible rotation matrix. |
2640 | | @param R2 Another possible rotation matrix. |
2641 | | @param t One possible translation. |
2642 | | |
2643 | | This function decomposes the essential matrix E using svd decomposition @cite HartleyZ00. In |
2644 | | general, four possible poses exist for the decomposition of E. They are \f$[R_1, t]\f$, |
2645 | | \f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$. |
2646 | | |
2647 | | If E gives the epipolar constraint \f$[p_2; 1]^T A^{-T} E A^{-1} [p_1; 1] = 0\f$ between the image |
2648 | | points \f$p_1\f$ in the first image and \f$p_2\f$ in second image, then any of the tuples |
2649 | | \f$[R_1, t]\f$, \f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$ is a change of basis from the first |
2650 | | camera's coordinate system to the second camera's coordinate system. However, by decomposing E, one |
2651 | | can only get the direction of the translation. For this reason, the translation t is returned with |
2652 | | unit length. |
2653 | | */ |
2654 | | CV_EXPORTS_W void decomposeEssentialMat( InputArray E, OutputArray R1, OutputArray R2, OutputArray t ); |
2655 | | |
2656 | | /** @brief Recovers the relative camera rotation and the translation from corresponding points in two images from two different cameras, using cheirality check. Returns the number of |
2657 | | inliers that pass the check. |
2658 | | |
2659 | | @param points1 Array of N 2D points from the first image. The point coordinates should be |
2660 | | floating-point (single or double precision). |
2661 | | @param points2 Array of the second image points of the same size and format as points1 . |
2662 | | @param cameraMatrix1 Input/output camera matrix for the first camera, the same as in |
2663 | | @ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. |
2664 | | @param distCoeffs1 Input/output vector of distortion coefficients, the same as in |
2665 | | @ref calibrateCamera. |
2666 | | @param cameraMatrix2 Input/output camera matrix for the first camera, the same as in |
2667 | | @ref calibrateCamera. Furthermore, for the stereo case, additional flags may be used, see below. |
2668 | | @param distCoeffs2 Input/output vector of distortion coefficients, the same as in |
2669 | | @ref calibrateCamera. |
2670 | | @param E The output essential matrix. |
2671 | | @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple |
2672 | | that performs a change of basis from the first camera's coordinate system to the second camera's |
2673 | | coordinate system. Note that, in general, t can not be used for this tuple, see the parameter |
2674 | | described below. |
2675 | | @param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and |
2676 | | therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit |
2677 | | length. |
2678 | | @param method Method for computing an essential matrix. |
2679 | | - @ref RANSAC for the RANSAC algorithm. |
2680 | | - @ref LMEDS for the LMedS algorithm. |
2681 | | @param prob Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of |
2682 | | confidence (probability) that the estimated matrix is correct. |
2683 | | @param threshold Parameter used for RANSAC. It is the maximum distance from a point to an epipolar |
2684 | | line in pixels, beyond which the point is considered an outlier and is not used for computing the |
2685 | | final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the |
2686 | | point localization, image resolution, and the image noise. |
2687 | | @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks |
2688 | | inliers in points1 and points2 for then given essential matrix E. Only these inliers will be used to |
2689 | | recover pose. In the output mask only inliers which pass the cheirality check. |
2690 | | |
2691 | | This function decomposes an essential matrix using @ref decomposeEssentialMat and then verifies |
2692 | | possible pose hypotheses by doing cheirality check. The cheirality check means that the |
2693 | | triangulated 3D points should have positive depth. Some details can be found in @cite Nister03. |
2694 | | |
2695 | | This function can be used to process the output E and mask from @ref findEssentialMat. In this |
2696 | | scenario, points1 and points2 are the same input for findEssentialMat.: |
2697 | | @code |
2698 | | // Example. Estimation of fundamental matrix using the RANSAC algorithm |
2699 | | int point_count = 100; |
2700 | | vector<Point2f> points1(point_count); |
2701 | | vector<Point2f> points2(point_count); |
2702 | | |
2703 | | // initialize the points here ... |
2704 | | for( int i = 0; i < point_count; i++ ) |
2705 | | { |
2706 | | points1[i] = ...; |
2707 | | points2[i] = ...; |
2708 | | } |
2709 | | |
2710 | | // Input: camera calibration of both cameras, for example using intrinsic chessboard calibration. |
2711 | | Mat cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2; |
2712 | | |
2713 | | // Output: Essential matrix, relative rotation and relative translation. |
2714 | | Mat E, R, t, mask; |
2715 | | |
2716 | | recoverPose(points1, points2, cameraMatrix1, distCoeffs1, cameraMatrix2, distCoeffs2, E, R, t, mask); |
2717 | | @endcode |
2718 | | */ |
2719 | | CV_EXPORTS_W int recoverPose( InputArray points1, InputArray points2, |
2720 | | InputArray cameraMatrix1, InputArray distCoeffs1, |
2721 | | InputArray cameraMatrix2, InputArray distCoeffs2, |
2722 | | OutputArray E, OutputArray R, OutputArray t, |
2723 | | int method = cv::RANSAC, double prob = 0.999, double threshold = 1.0, |
2724 | | InputOutputArray mask = noArray()); |
2725 | | |
2726 | | /** @brief Recovers the relative camera rotation and the translation from an estimated essential |
2727 | | matrix and the corresponding points in two images, using chirality check. Returns the number of |
2728 | | inliers that pass the check. |
2729 | | |
2730 | | @param E The input essential matrix. |
2731 | | @param points1 Array of N 2D points from the first image. The point coordinates should be |
2732 | | floating-point (single or double precision). |
2733 | | @param points2 Array of the second image points of the same size and format as points1 . |
2734 | | @param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ . |
2735 | | Note that this function assumes that points1 and points2 are feature points from cameras with the |
2736 | | same camera intrinsic matrix. |
2737 | | @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple |
2738 | | that performs a change of basis from the first camera's coordinate system to the second camera's |
2739 | | coordinate system. Note that, in general, t can not be used for this tuple, see the parameter |
2740 | | described below. |
2741 | | @param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and |
2742 | | therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit |
2743 | | length. |
2744 | | @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks |
2745 | | inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to |
2746 | | recover pose. In the output mask only inliers which pass the chirality check. |
2747 | | |
2748 | | This function decomposes an essential matrix using @ref decomposeEssentialMat and then verifies |
2749 | | possible pose hypotheses by doing chirality check. The chirality check means that the |
2750 | | triangulated 3D points should have positive depth. Some details can be found in @cite Nister03. |
2751 | | |
2752 | | This function can be used to process the output E and mask from @ref findEssentialMat. In this |
2753 | | scenario, points1 and points2 are the same input for #findEssentialMat : |
2754 | | @code |
2755 | | // Example. Estimation of fundamental matrix using the RANSAC algorithm |
2756 | | int point_count = 100; |
2757 | | vector<Point2f> points1(point_count); |
2758 | | vector<Point2f> points2(point_count); |
2759 | | |
2760 | | // initialize the points here ... |
2761 | | for( int i = 0; i < point_count; i++ ) |
2762 | | { |
2763 | | points1[i] = ...; |
2764 | | points2[i] = ...; |
2765 | | } |
2766 | | |
2767 | | // cametra matrix with both focal lengths = 1, and principal point = (0, 0) |
2768 | | Mat cameraMatrix = Mat::eye(3, 3, CV_64F); |
2769 | | |
2770 | | Mat E, R, t, mask; |
2771 | | |
2772 | | E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask); |
2773 | | recoverPose(E, points1, points2, cameraMatrix, R, t, mask); |
2774 | | @endcode |
2775 | | */ |
2776 | | CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2, |
2777 | | InputArray cameraMatrix, OutputArray R, OutputArray t, |
2778 | | InputOutputArray mask = noArray() ); |
2779 | | |
2780 | | /** @overload |
2781 | | @param E The input essential matrix. |
2782 | | @param points1 Array of N 2D points from the first image. The point coordinates should be |
2783 | | floating-point (single or double precision). |
2784 | | @param points2 Array of the second image points of the same size and format as points1 . |
2785 | | @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple |
2786 | | that performs a change of basis from the first camera's coordinate system to the second camera's |
2787 | | coordinate system. Note that, in general, t can not be used for this tuple, see the parameter |
2788 | | description below. |
2789 | | @param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and |
2790 | | therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit |
2791 | | length. |
2792 | | @param focal Focal length of the camera. Note that this function assumes that points1 and points2 |
2793 | | are feature points from cameras with same focal length and principal point. |
2794 | | @param pp principal point of the camera. |
2795 | | @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks |
2796 | | inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to |
2797 | | recover pose. In the output mask only inliers which pass the chirality check. |
2798 | | |
2799 | | This function differs from the one above that it computes camera intrinsic matrix from focal length and |
2800 | | principal point: |
2801 | | |
2802 | | \f[A = |
2803 | | \begin{bmatrix} |
2804 | | f & 0 & x_{pp} \\ |
2805 | | 0 & f & y_{pp} \\ |
2806 | | 0 & 0 & 1 |
2807 | | \end{bmatrix}\f] |
2808 | | */ |
2809 | | CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2, |
2810 | | OutputArray R, OutputArray t, |
2811 | | double focal = 1.0, Point2d pp = Point2d(0, 0), |
2812 | | InputOutputArray mask = noArray() ); |
2813 | | |
2814 | | /** @overload |
2815 | | @param E The input essential matrix. |
2816 | | @param points1 Array of N 2D points from the first image. The point coordinates should be |
2817 | | floating-point (single or double precision). |
2818 | | @param points2 Array of the second image points of the same size and format as points1. |
2819 | | @param cameraMatrix Camera intrinsic matrix \f$\cameramatrix{A}\f$ . |
2820 | | Note that this function assumes that points1 and points2 are feature points from cameras with the |
2821 | | same camera intrinsic matrix. |
2822 | | @param R Output rotation matrix. Together with the translation vector, this matrix makes up a tuple |
2823 | | that performs a change of basis from the first camera's coordinate system to the second camera's |
2824 | | coordinate system. Note that, in general, t can not be used for this tuple, see the parameter |
2825 | | description below. |
2826 | | @param t Output translation vector. This vector is obtained by @ref decomposeEssentialMat and |
2827 | | therefore is only known up to scale, i.e. t is the direction of the translation vector and has unit |
2828 | | length. |
2829 | | @param distanceThresh threshold distance which is used to filter out far away points (i.e. infinite |
2830 | | points). |
2831 | | @param mask Input/output mask for inliers in points1 and points2. If it is not empty, then it marks |
2832 | | inliers in points1 and points2 for the given essential matrix E. Only these inliers will be used to |
2833 | | recover pose. In the output mask only inliers which pass the chirality check. |
2834 | | @param triangulatedPoints 3D points which were reconstructed by triangulation. |
2835 | | |
2836 | | This function differs from the one above that it outputs the triangulated 3D point that are used for |
2837 | | the chirality check. |
2838 | | */ |
2839 | | CV_EXPORTS_W int recoverPose( InputArray E, InputArray points1, InputArray points2, |
2840 | | InputArray cameraMatrix, OutputArray R, OutputArray t, double distanceThresh, InputOutputArray mask = noArray(), |
2841 | | OutputArray triangulatedPoints = noArray()); |
2842 | | |
2843 | | /** @brief For points in an image of a stereo pair, computes the corresponding epilines in the other image. |
2844 | | |
2845 | | @param points Input points. \f$N \times 1\f$ or \f$1 \times N\f$ matrix of type CV_32FC2 or |
2846 | | vector\<Point2f\> . |
2847 | | @param whichImage Index of the image (1 or 2) that contains the points . |
2848 | | @param F Fundamental matrix that can be estimated using #findFundamentalMat or #stereoRectify . |
2849 | | @param lines Output vector of the epipolar lines corresponding to the points in the other image. |
2850 | | Each line \f$ax + by + c=0\f$ is encoded by 3 numbers \f$(a, b, c)\f$ . |
2851 | | |
2852 | | For every point in one of the two images of a stereo pair, the function finds the equation of the |
2853 | | corresponding epipolar line in the other image. |
2854 | | |
2855 | | From the fundamental matrix definition (see #findFundamentalMat ), line \f$l^{(2)}_i\f$ in the second |
2856 | | image for the point \f$p^{(1)}_i\f$ in the first image (when whichImage=1 ) is computed as: |
2857 | | |
2858 | | \f[l^{(2)}_i = F p^{(1)}_i\f] |
2859 | | |
2860 | | And vice versa, when whichImage=2, \f$l^{(1)}_i\f$ is computed from \f$p^{(2)}_i\f$ as: |
2861 | | |
2862 | | \f[l^{(1)}_i = F^T p^{(2)}_i\f] |
2863 | | |
2864 | | Line coefficients are defined up to a scale. They are normalized so that \f$a_i^2+b_i^2=1\f$ . |
2865 | | */ |
2866 | | CV_EXPORTS_W void computeCorrespondEpilines( InputArray points, int whichImage, |
2867 | | InputArray F, OutputArray lines ); |
2868 | | |
2869 | | /** @brief This function reconstructs 3-dimensional points (in homogeneous coordinates) by using |
2870 | | their observations with a stereo camera. |
2871 | | |
2872 | | @param projMatr1 3x4 projection matrix of the first camera, i.e. this matrix projects 3D points |
2873 | | given in the world's coordinate system into the first image. |
2874 | | @param projMatr2 3x4 projection matrix of the second camera, i.e. this matrix projects 3D points |
2875 | | given in the world's coordinate system into the second image. |
2876 | | @param projPoints1 2xN array of feature points in the first image. In the case of the c++ version, |
2877 | | it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1. |
2878 | | @param projPoints2 2xN array of corresponding points in the second image. In the case of the c++ |
2879 | | version, it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1. |
2880 | | @param points4D 4xN array of reconstructed points in homogeneous coordinates. These points are |
2881 | | returned in the world's coordinate system. |
2882 | | |
2883 | | @note |
2884 | | Keep in mind that all input data should be of float type in order for this function to work. |
2885 | | |
2886 | | @note |
2887 | | If the projection matrices from @ref stereoRectify are used, then the returned points are |
2888 | | represented in the first camera's rectified coordinate system. |
2889 | | |
2890 | | @sa |
2891 | | reprojectImageTo3D |
2892 | | */ |
2893 | | CV_EXPORTS_W void triangulatePoints( InputArray projMatr1, InputArray projMatr2, |
2894 | | InputArray projPoints1, InputArray projPoints2, |
2895 | | OutputArray points4D ); |
2896 | | |
2897 | | /** @brief Refines coordinates of corresponding points. |
2898 | | |
2899 | | @param F 3x3 fundamental matrix. |
2900 | | @param points1 1xN array containing the first set of points. |
2901 | | @param points2 1xN array containing the second set of points. |
2902 | | @param newPoints1 The optimized points1. |
2903 | | @param newPoints2 The optimized points2. |
2904 | | |
2905 | | The function implements the Optimal Triangulation Method (see Multiple View Geometry @cite HartleyZ00 for details). |
2906 | | For each given point correspondence points1[i] \<-\> points2[i], and a fundamental matrix F, it |
2907 | | computes the corrected correspondences newPoints1[i] \<-\> newPoints2[i] that minimize the geometric |
2908 | | error \f$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\f$ (where \f$d(a,b)\f$ is the |
2909 | | geometric distance between points \f$a\f$ and \f$b\f$ ) subject to the epipolar constraint |
2910 | | \f$newPoints2^T \cdot F \cdot newPoints1 = 0\f$ . |
2911 | | */ |
2912 | | CV_EXPORTS_W void correctMatches( InputArray F, InputArray points1, InputArray points2, |
2913 | | OutputArray newPoints1, OutputArray newPoints2 ); |
2914 | | |
2915 | | /** @brief Filters off small noise blobs (speckles) in the disparity map |
2916 | | |
2917 | | @param img The input 16-bit signed disparity image |
2918 | | @param newVal The disparity value used to paint-off the speckles |
2919 | | @param maxSpeckleSize The maximum speckle size to consider it a speckle. Larger blobs are not |
2920 | | affected by the algorithm |
2921 | | @param maxDiff Maximum difference between neighbor disparity pixels to put them into the same |
2922 | | blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point |
2923 | | disparity map, where disparity values are multiplied by 16, this scale factor should be taken into |
2924 | | account when specifying this parameter value. |
2925 | | @param buf The optional temporary buffer to avoid memory allocation within the function. |
2926 | | */ |
2927 | | CV_EXPORTS_W void filterSpeckles( InputOutputArray img, double newVal, |
2928 | | int maxSpeckleSize, double maxDiff, |
2929 | | InputOutputArray buf = noArray() ); |
2930 | | |
2931 | | //! computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by #stereoRectify) |
2932 | | CV_EXPORTS_W Rect getValidDisparityROI( Rect roi1, Rect roi2, |
2933 | | int minDisparity, int numberOfDisparities, |
2934 | | int blockSize ); |
2935 | | |
2936 | | //! validates disparity using the left-right check. The matrix "cost" should be computed by the stereo correspondence algorithm |
2937 | | CV_EXPORTS_W void validateDisparity( InputOutputArray disparity, InputArray cost, |
2938 | | int minDisparity, int numberOfDisparities, |
2939 | | int disp12MaxDisp = 1 ); |
2940 | | |
2941 | | /** @brief Reprojects a disparity image to 3D space. |
2942 | | |
2943 | | @param disparity Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit |
2944 | | floating-point disparity image. The values of 8-bit / 16-bit signed formats are assumed to have no |
2945 | | fractional bits. If the disparity is 16-bit signed format, as computed by @ref StereoBM or |
2946 | | @ref StereoSGBM and maybe other algorithms, it should be divided by 16 (and scaled to float) before |
2947 | | being used here. |
2948 | | @param _3dImage Output 3-channel floating-point image of the same size as disparity. Each element of |
2949 | | _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity map. If one |
2950 | | uses Q obtained by @ref stereoRectify, then the returned points are represented in the first |
2951 | | camera's rectified coordinate system. |
2952 | | @param Q \f$4 \times 4\f$ perspective transformation matrix that can be obtained with |
2953 | | @ref stereoRectify. |
2954 | | @param handleMissingValues Indicates, whether the function should handle missing values (i.e. |
2955 | | points where the disparity was not computed). If handleMissingValues=true, then pixels with the |
2956 | | minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed |
2957 | | to 3D points with a very large Z value (currently set to 10000). |
2958 | | @param ddepth The optional output array depth. If it is -1, the output image will have CV_32F |
2959 | | depth. ddepth can also be set to CV_16S, CV_32S or CV_32F. |
2960 | | |
2961 | | The function transforms a single-channel disparity map to a 3-channel image representing a 3D |
2962 | | surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it |
2963 | | computes: |
2964 | | |
2965 | | \f[\begin{bmatrix} |
2966 | | X \\ |
2967 | | Y \\ |
2968 | | Z \\ |
2969 | | W |
2970 | | \end{bmatrix} = Q \begin{bmatrix} |
2971 | | x \\ |
2972 | | y \\ |
2973 | | \texttt{disparity} (x,y) \\ |
2974 | | z |
2975 | | \end{bmatrix}.\f] |
2976 | | |
2977 | | @sa |
2978 | | To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform. |
2979 | | */ |
2980 | | CV_EXPORTS_W void reprojectImageTo3D( InputArray disparity, |
2981 | | OutputArray _3dImage, InputArray Q, |
2982 | | bool handleMissingValues = false, |
2983 | | int ddepth = -1 ); |
2984 | | |
2985 | | /** @brief Calculates the Sampson Distance between two points. |
2986 | | |
2987 | | The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as: |
2988 | | \f[ |
2989 | | sd( \texttt{pt1} , \texttt{pt2} )= |
2990 | | \frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2} |
2991 | | {((\texttt{F} \cdot \texttt{pt1})(0))^2 + |
2992 | | ((\texttt{F} \cdot \texttt{pt1})(1))^2 + |
2993 | | ((\texttt{F}^t \cdot \texttt{pt2})(0))^2 + |
2994 | | ((\texttt{F}^t \cdot \texttt{pt2})(1))^2} |
2995 | | \f] |
2996 | | The fundamental matrix may be calculated using the #findFundamentalMat function. See @cite HartleyZ00 11.4.3 for details. |
2997 | | @param pt1 first homogeneous 2d point |
2998 | | @param pt2 second homogeneous 2d point |
2999 | | @param F fundamental matrix |
3000 | | @return The computed Sampson distance. |
3001 | | */ |
3002 | | CV_EXPORTS_W double sampsonDistance(InputArray pt1, InputArray pt2, InputArray F); |
3003 | | |
3004 | | /** @brief Computes an optimal affine transformation between two 3D point sets. |
3005 | | |
3006 | | It computes |
3007 | | \f[ |
3008 | | \begin{bmatrix} |
3009 | | x\\ |
3010 | | y\\ |
3011 | | z\\ |
3012 | | \end{bmatrix} |
3013 | | = |
3014 | | \begin{bmatrix} |
3015 | | a_{11} & a_{12} & a_{13}\\ |
3016 | | a_{21} & a_{22} & a_{23}\\ |
3017 | | a_{31} & a_{32} & a_{33}\\ |
3018 | | \end{bmatrix} |
3019 | | \begin{bmatrix} |
3020 | | X\\ |
3021 | | Y\\ |
3022 | | Z\\ |
3023 | | \end{bmatrix} |
3024 | | + |
3025 | | \begin{bmatrix} |
3026 | | b_1\\ |
3027 | | b_2\\ |
3028 | | b_3\\ |
3029 | | \end{bmatrix} |
3030 | | \f] |
3031 | | |
3032 | | @param src First input 3D point set containing \f$(X,Y,Z)\f$. |
3033 | | @param dst Second input 3D point set containing \f$(x,y,z)\f$. |
3034 | | @param out Output 3D affine transformation matrix \f$3 \times 4\f$ of the form |
3035 | | \f[ |
3036 | | \begin{bmatrix} |
3037 | | a_{11} & a_{12} & a_{13} & b_1\\ |
3038 | | a_{21} & a_{22} & a_{23} & b_2\\ |
3039 | | a_{31} & a_{32} & a_{33} & b_3\\ |
3040 | | \end{bmatrix} |
3041 | | \f] |
3042 | | @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). |
3043 | | @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as |
3044 | | an inlier. |
3045 | | @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything |
3046 | | between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation |
3047 | | significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. |
3048 | | |
3049 | | The function estimates an optimal 3D affine transformation between two 3D point sets using the |
3050 | | RANSAC algorithm. |
3051 | | */ |
3052 | | CV_EXPORTS_W int estimateAffine3D(InputArray src, InputArray dst, |
3053 | | OutputArray out, OutputArray inliers, |
3054 | | double ransacThreshold = 3, double confidence = 0.99); |
3055 | | |
3056 | | /** @brief Computes an optimal affine transformation between two 3D point sets. |
3057 | | |
3058 | | It computes \f$R,s,t\f$ minimizing \f$\sum{i} dst_i - c \cdot R \cdot src_i \f$ |
3059 | | where \f$R\f$ is a 3x3 rotation matrix, \f$t\f$ is a 3x1 translation vector and \f$s\f$ is a |
3060 | | scalar size value. This is an implementation of the algorithm by Umeyama \cite umeyama1991least . |
3061 | | The estimated affine transform has a homogeneous scale which is a subclass of affine |
3062 | | transformations with 7 degrees of freedom. The paired point sets need to comprise at least 3 |
3063 | | points each. |
3064 | | |
3065 | | @param src First input 3D point set. |
3066 | | @param dst Second input 3D point set. |
3067 | | @param scale If null is passed, the scale parameter c will be assumed to be 1.0. |
3068 | | Else the pointed-to variable will be set to the optimal scale. |
3069 | | @param force_rotation If true, the returned rotation will never be a reflection. |
3070 | | This might be unwanted, e.g. when optimizing a transform between a right- and a |
3071 | | left-handed coordinate system. |
3072 | | @return 3D affine transformation matrix \f$3 \times 4\f$ of the form |
3073 | | \f[T = |
3074 | | \begin{bmatrix} |
3075 | | R & t\\ |
3076 | | \end{bmatrix} |
3077 | | \f] |
3078 | | |
3079 | | */ |
3080 | | CV_EXPORTS_W cv::Mat estimateAffine3D(InputArray src, InputArray dst, |
3081 | | CV_OUT double* scale = nullptr, bool force_rotation = true); |
3082 | | |
3083 | | /** @brief Computes an optimal translation between two 3D point sets. |
3084 | | * |
3085 | | * It computes |
3086 | | * \f[ |
3087 | | * \begin{bmatrix} |
3088 | | * x\\ |
3089 | | * y\\ |
3090 | | * z\\ |
3091 | | * \end{bmatrix} |
3092 | | * = |
3093 | | * \begin{bmatrix} |
3094 | | * X\\ |
3095 | | * Y\\ |
3096 | | * Z\\ |
3097 | | * \end{bmatrix} |
3098 | | * + |
3099 | | * \begin{bmatrix} |
3100 | | * b_1\\ |
3101 | | * b_2\\ |
3102 | | * b_3\\ |
3103 | | * \end{bmatrix} |
3104 | | * \f] |
3105 | | * |
3106 | | * @param src First input 3D point set containing \f$(X,Y,Z)\f$. |
3107 | | * @param dst Second input 3D point set containing \f$(x,y,z)\f$. |
3108 | | * @param out Output 3D translation vector \f$3 \times 1\f$ of the form |
3109 | | * \f[ |
3110 | | * \begin{bmatrix} |
3111 | | * b_1 \\ |
3112 | | * b_2 \\ |
3113 | | * b_3 \\ |
3114 | | * \end{bmatrix} |
3115 | | * \f] |
3116 | | * @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). |
3117 | | * @param ransacThreshold Maximum reprojection error in the RANSAC algorithm to consider a point as |
3118 | | * an inlier. |
3119 | | * @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything |
3120 | | * between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation |
3121 | | * significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. |
3122 | | * |
3123 | | * The function estimates an optimal 3D translation between two 3D point sets using the |
3124 | | * RANSAC algorithm. |
3125 | | * */ |
3126 | | CV_EXPORTS_W int estimateTranslation3D(InputArray src, InputArray dst, |
3127 | | OutputArray out, OutputArray inliers, |
3128 | | double ransacThreshold = 3, double confidence = 0.99); |
3129 | | |
3130 | | /** @brief Computes an optimal affine transformation between two 2D point sets. |
3131 | | |
3132 | | It computes |
3133 | | \f[ |
3134 | | \begin{bmatrix} |
3135 | | x\\ |
3136 | | y\\ |
3137 | | \end{bmatrix} |
3138 | | = |
3139 | | \begin{bmatrix} |
3140 | | a_{11} & a_{12}\\ |
3141 | | a_{21} & a_{22}\\ |
3142 | | \end{bmatrix} |
3143 | | \begin{bmatrix} |
3144 | | X\\ |
3145 | | Y\\ |
3146 | | \end{bmatrix} |
3147 | | + |
3148 | | \begin{bmatrix} |
3149 | | b_1\\ |
3150 | | b_2\\ |
3151 | | \end{bmatrix} |
3152 | | \f] |
3153 | | |
3154 | | @param from First input 2D point set containing \f$(X,Y)\f$. |
3155 | | @param to Second input 2D point set containing \f$(x,y)\f$. |
3156 | | @param inliers Output vector indicating which points are inliers (1-inlier, 0-outlier). |
3157 | | @param method Robust method used to compute transformation. The following methods are possible: |
3158 | | - @ref RANSAC - RANSAC-based robust method |
3159 | | - @ref LMEDS - Least-Median robust method |
3160 | | RANSAC is the default method. |
3161 | | @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider |
3162 | | a point as an inlier. Applies only to RANSAC. |
3163 | | @param maxIters The maximum number of robust method iterations. |
3164 | | @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything |
3165 | | between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation |
3166 | | significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. |
3167 | | @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt). |
3168 | | Passing 0 will disable refining, so the output matrix will be output of robust method. |
3169 | | |
3170 | | @return Output 2D affine transformation matrix \f$2 \times 3\f$ or empty matrix if transformation |
3171 | | could not be estimated. The returned matrix has the following form: |
3172 | | \f[ |
3173 | | \begin{bmatrix} |
3174 | | a_{11} & a_{12} & b_1\\ |
3175 | | a_{21} & a_{22} & b_2\\ |
3176 | | \end{bmatrix} |
3177 | | \f] |
3178 | | |
3179 | | The function estimates an optimal 2D affine transformation between two 2D point sets using the |
3180 | | selected robust algorithm. |
3181 | | |
3182 | | The computed transformation is then refined further (using only inliers) with the |
3183 | | Levenberg-Marquardt method to reduce the re-projection error even more. |
3184 | | |
3185 | | @note |
3186 | | The RANSAC method can handle practically any ratio of outliers but needs a threshold to |
3187 | | distinguish inliers from outliers. The method LMeDS does not need any threshold but it works |
3188 | | correctly only when there are more than 50% of inliers. |
3189 | | |
3190 | | @sa estimateAffinePartial2D, getAffineTransform |
3191 | | */ |
3192 | | CV_EXPORTS_W cv::Mat estimateAffine2D(InputArray from, InputArray to, OutputArray inliers = noArray(), |
3193 | | int method = RANSAC, double ransacReprojThreshold = 3, |
3194 | | size_t maxIters = 2000, double confidence = 0.99, |
3195 | | size_t refineIters = 10); |
3196 | | |
3197 | | |
3198 | | CV_EXPORTS_W cv::Mat estimateAffine2D(InputArray pts1, InputArray pts2, OutputArray inliers, |
3199 | | const UsacParams ¶ms); |
3200 | | |
3201 | | /** @brief Computes an optimal limited affine transformation with 4 degrees of freedom between |
3202 | | two 2D point sets. |
3203 | | |
3204 | | @param from First input 2D point set. |
3205 | | @param to Second input 2D point set. |
3206 | | @param inliers Output vector indicating which points are inliers. |
3207 | | @param method Robust method used to compute transformation. The following methods are possible: |
3208 | | - @ref RANSAC - RANSAC-based robust method |
3209 | | - @ref LMEDS - Least-Median robust method |
3210 | | RANSAC is the default method. |
3211 | | @param ransacReprojThreshold Maximum reprojection error in the RANSAC algorithm to consider |
3212 | | a point as an inlier. Applies only to RANSAC. |
3213 | | @param maxIters The maximum number of robust method iterations. |
3214 | | @param confidence Confidence level, between 0 and 1, for the estimated transformation. Anything |
3215 | | between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation |
3216 | | significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation. |
3217 | | @param refineIters Maximum number of iterations of refining algorithm (Levenberg-Marquardt). |
3218 | | Passing 0 will disable refining, so the output matrix will be output of robust method. |
3219 | | |
3220 | | @return Output 2D affine transformation (4 degrees of freedom) matrix \f$2 \times 3\f$ or |
3221 | | empty matrix if transformation could not be estimated. |
3222 | | |
3223 | | The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to |
3224 | | combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust |
3225 | | estimation. |
3226 | | |
3227 | | The computed transformation is then refined further (using only inliers) with the |
3228 | | Levenberg-Marquardt method to reduce the re-projection error even more. |
3229 | | |
3230 | | Estimated transformation matrix is: |
3231 | | \f[ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\ |
3232 | | \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y |
3233 | | \end{bmatrix} \f] |
3234 | | Where \f$ \theta \f$ is the rotation angle, \f$ s \f$ the scaling factor and \f$ t_x, t_y \f$ are |
3235 | | translations in \f$ x, y \f$ axes respectively. |
3236 | | |
3237 | | @note |
3238 | | The RANSAC method can handle practically any ratio of outliers but need a threshold to |
3239 | | distinguish inliers from outliers. The method LMeDS does not need any threshold but it works |
3240 | | correctly only when there are more than 50% of inliers. |
3241 | | |
3242 | | @sa estimateAffine2D, getAffineTransform |
3243 | | */ |
3244 | | CV_EXPORTS_W cv::Mat estimateAffinePartial2D(InputArray from, InputArray to, OutputArray inliers = noArray(), |
3245 | | int method = RANSAC, double ransacReprojThreshold = 3, |
3246 | | size_t maxIters = 2000, double confidence = 0.99, |
3247 | | size_t refineIters = 10); |
3248 | | |
3249 | | /** @example samples/cpp/tutorial_code/features2D/Homography/decompose_homography.cpp |
3250 | | An example program with homography decomposition. |
3251 | | |
3252 | | Check @ref tutorial_homography "the corresponding tutorial" for more details. |
3253 | | */ |
3254 | | |
3255 | | /** @brief Decompose a homography matrix to rotation(s), translation(s) and plane normal(s). |
3256 | | |
3257 | | @param H The input homography matrix between two images. |
3258 | | @param K The input camera intrinsic matrix. |
3259 | | @param rotations Array of rotation matrices. |
3260 | | @param translations Array of translation matrices. |
3261 | | @param normals Array of plane normal matrices. |
3262 | | |
3263 | | This function extracts relative camera motion between two views of a planar object and returns up to |
3264 | | four mathematical solution tuples of rotation, translation, and plane normal. The decomposition of |
3265 | | the homography matrix H is described in detail in @cite Malis2007. |
3266 | | |
3267 | | If the homography H, induced by the plane, gives the constraint |
3268 | | \f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f] on the source image points |
3269 | | \f$p_i\f$ and the destination image points \f$p'_i\f$, then the tuple of rotations[k] and |
3270 | | translations[k] is a change of basis from the source camera's coordinate system to the destination |
3271 | | camera's coordinate system. However, by decomposing H, one can only get the translation normalized |
3272 | | by the (typically unknown) depth of the scene, i.e. its direction but with normalized length. |
3273 | | |
3274 | | If point correspondences are available, at least two solutions may further be invalidated, by |
3275 | | applying positive depth constraint, i.e. all points must be in front of the camera. |
3276 | | */ |
3277 | | CV_EXPORTS_W int decomposeHomographyMat(InputArray H, |
3278 | | InputArray K, |
3279 | | OutputArrayOfArrays rotations, |
3280 | | OutputArrayOfArrays translations, |
3281 | | OutputArrayOfArrays normals); |
3282 | | |
3283 | | /** @brief Filters homography decompositions based on additional information. |
3284 | | |
3285 | | @param rotations Vector of rotation matrices. |
3286 | | @param normals Vector of plane normal matrices. |
3287 | | @param beforePoints Vector of (rectified) visible reference points before the homography is applied |
3288 | | @param afterPoints Vector of (rectified) visible reference points after the homography is applied |
3289 | | @param possibleSolutions Vector of int indices representing the viable solution set after filtering |
3290 | | @param pointsMask optional Mat/Vector of 8u type representing the mask for the inliers as given by the #findHomography function |
3291 | | |
3292 | | This function is intended to filter the output of the #decomposeHomographyMat based on additional |
3293 | | information as described in @cite Malis2007 . The summary of the method: the #decomposeHomographyMat function |
3294 | | returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the |
3295 | | sets of points visible in the camera frame before and after the homography transformation is applied, |
3296 | | we can determine which are the true potential solutions and which are the opposites by verifying which |
3297 | | homographies are consistent with all visible reference points being in front of the camera. The inputs |
3298 | | are left unchanged; the filtered solution set is returned as indices into the existing one. |
3299 | | |
3300 | | */ |
3301 | | CV_EXPORTS_W void filterHomographyDecompByVisibleRefpoints(InputArrayOfArrays rotations, |
3302 | | InputArrayOfArrays normals, |
3303 | | InputArray beforePoints, |
3304 | | InputArray afterPoints, |
3305 | | OutputArray possibleSolutions, |
3306 | | InputArray pointsMask = noArray()); |
3307 | | |
3308 | | /** @brief The base class for stereo correspondence algorithms. |
3309 | | */ |
3310 | | class CV_EXPORTS_W StereoMatcher : public Algorithm |
3311 | | { |
3312 | | public: |
3313 | | enum { DISP_SHIFT = 4, |
3314 | | DISP_SCALE = (1 << DISP_SHIFT) |
3315 | | }; |
3316 | | |
3317 | | /** @brief Computes disparity map for the specified stereo pair |
3318 | | |
3319 | | @param left Left 8-bit single-channel image. |
3320 | | @param right Right image of the same size and the same type as the left one. |
3321 | | @param disparity Output disparity map. It has the same size as the input images. Some algorithms, |
3322 | | like StereoBM or StereoSGBM compute 16-bit fixed-point disparity map (where each disparity value |
3323 | | has 4 fractional bits), whereas other algorithms output 32-bit floating-point disparity map. |
3324 | | */ |
3325 | | CV_WRAP virtual void compute( InputArray left, InputArray right, |
3326 | | OutputArray disparity ) = 0; |
3327 | | |
3328 | | CV_WRAP virtual int getMinDisparity() const = 0; |
3329 | | CV_WRAP virtual void setMinDisparity(int minDisparity) = 0; |
3330 | | |
3331 | | CV_WRAP virtual int getNumDisparities() const = 0; |
3332 | | CV_WRAP virtual void setNumDisparities(int numDisparities) = 0; |
3333 | | |
3334 | | CV_WRAP virtual int getBlockSize() const = 0; |
3335 | | CV_WRAP virtual void setBlockSize(int blockSize) = 0; |
3336 | | |
3337 | | CV_WRAP virtual int getSpeckleWindowSize() const = 0; |
3338 | | CV_WRAP virtual void setSpeckleWindowSize(int speckleWindowSize) = 0; |
3339 | | |
3340 | | CV_WRAP virtual int getSpeckleRange() const = 0; |
3341 | | CV_WRAP virtual void setSpeckleRange(int speckleRange) = 0; |
3342 | | |
3343 | | CV_WRAP virtual int getDisp12MaxDiff() const = 0; |
3344 | | CV_WRAP virtual void setDisp12MaxDiff(int disp12MaxDiff) = 0; |
3345 | | }; |
3346 | | |
3347 | | |
3348 | | /** @brief Class for computing stereo correspondence using the block matching algorithm, introduced and |
3349 | | contributed to OpenCV by K. Konolige. |
3350 | | */ |
3351 | | class CV_EXPORTS_W StereoBM : public StereoMatcher |
3352 | | { |
3353 | | public: |
3354 | | enum { PREFILTER_NORMALIZED_RESPONSE = 0, |
3355 | | PREFILTER_XSOBEL = 1 |
3356 | | }; |
3357 | | |
3358 | | CV_WRAP virtual int getPreFilterType() const = 0; |
3359 | | CV_WRAP virtual void setPreFilterType(int preFilterType) = 0; |
3360 | | |
3361 | | CV_WRAP virtual int getPreFilterSize() const = 0; |
3362 | | CV_WRAP virtual void setPreFilterSize(int preFilterSize) = 0; |
3363 | | |
3364 | | CV_WRAP virtual int getPreFilterCap() const = 0; |
3365 | | CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0; |
3366 | | |
3367 | | CV_WRAP virtual int getTextureThreshold() const = 0; |
3368 | | CV_WRAP virtual void setTextureThreshold(int textureThreshold) = 0; |
3369 | | |
3370 | | CV_WRAP virtual int getUniquenessRatio() const = 0; |
3371 | | CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0; |
3372 | | |
3373 | | CV_WRAP virtual int getSmallerBlockSize() const = 0; |
3374 | | CV_WRAP virtual void setSmallerBlockSize(int blockSize) = 0; |
3375 | | |
3376 | | CV_WRAP virtual Rect getROI1() const = 0; |
3377 | | CV_WRAP virtual void setROI1(Rect roi1) = 0; |
3378 | | |
3379 | | CV_WRAP virtual Rect getROI2() const = 0; |
3380 | | CV_WRAP virtual void setROI2(Rect roi2) = 0; |
3381 | | |
3382 | | /** @brief Creates StereoBM object |
3383 | | |
3384 | | @param numDisparities the disparity search range. For each pixel algorithm will find the best |
3385 | | disparity from 0 (default minimum disparity) to numDisparities. The search range can then be |
3386 | | shifted by changing the minimum disparity. |
3387 | | @param blockSize the linear size of the blocks compared by the algorithm. The size should be odd |
3388 | | (as the block is centered at the current pixel). Larger block size implies smoother, though less |
3389 | | accurate disparity map. Smaller block size gives more detailed disparity map, but there is higher |
3390 | | chance for algorithm to find a wrong correspondence. |
3391 | | |
3392 | | The function create StereoBM object. You can then call StereoBM::compute() to compute disparity for |
3393 | | a specific stereo pair. |
3394 | | */ |
3395 | | CV_WRAP static Ptr<StereoBM> create(int numDisparities = 0, int blockSize = 21); |
3396 | | }; |
3397 | | |
3398 | | /** @brief The class implements the modified H. Hirschmuller algorithm @cite HH08 that differs from the original |
3399 | | one as follows: |
3400 | | |
3401 | | - By default, the algorithm is single-pass, which means that you consider only 5 directions |
3402 | | instead of 8. Set mode=StereoSGBM::MODE_HH in createStereoSGBM to run the full variant of the |
3403 | | algorithm but beware that it may consume a lot of memory. |
3404 | | - The algorithm matches blocks, not individual pixels. Though, setting blockSize=1 reduces the |
3405 | | blocks to single pixels. |
3406 | | - Mutual information cost function is not implemented. Instead, a simpler Birchfield-Tomasi |
3407 | | sub-pixel metric from @cite BT98 is used. Though, the color images are supported as well. |
3408 | | - Some pre- and post- processing steps from K. Konolige algorithm StereoBM are included, for |
3409 | | example: pre-filtering (StereoBM::PREFILTER_XSOBEL type) and post-filtering (uniqueness |
3410 | | check, quadratic interpolation and speckle filtering). |
3411 | | |
3412 | | @note |
3413 | | - (Python) An example illustrating the use of the StereoSGBM matching algorithm can be found |
3414 | | at opencv_source_code/samples/python/stereo_match.py |
3415 | | */ |
3416 | | class CV_EXPORTS_W StereoSGBM : public StereoMatcher |
3417 | | { |
3418 | | public: |
3419 | | enum |
3420 | | { |
3421 | | MODE_SGBM = 0, |
3422 | | MODE_HH = 1, |
3423 | | MODE_SGBM_3WAY = 2, |
3424 | | MODE_HH4 = 3 |
3425 | | }; |
3426 | | |
3427 | | CV_WRAP virtual int getPreFilterCap() const = 0; |
3428 | | CV_WRAP virtual void setPreFilterCap(int preFilterCap) = 0; |
3429 | | |
3430 | | CV_WRAP virtual int getUniquenessRatio() const = 0; |
3431 | | CV_WRAP virtual void setUniquenessRatio(int uniquenessRatio) = 0; |
3432 | | |
3433 | | CV_WRAP virtual int getP1() const = 0; |
3434 | | CV_WRAP virtual void setP1(int P1) = 0; |
3435 | | |
3436 | | CV_WRAP virtual int getP2() const = 0; |
3437 | | CV_WRAP virtual void setP2(int P2) = 0; |
3438 | | |
3439 | | CV_WRAP virtual int getMode() const = 0; |
3440 | | CV_WRAP virtual void setMode(int mode) = 0; |
3441 | | |
3442 | | /** @brief Creates StereoSGBM object |
3443 | | |
3444 | | @param minDisparity Minimum possible disparity value. Normally, it is zero but sometimes |
3445 | | rectification algorithms can shift images, so this parameter needs to be adjusted accordingly. |
3446 | | @param numDisparities Maximum disparity minus minimum disparity. The value is always greater than |
3447 | | zero. In the current implementation, this parameter must be divisible by 16. |
3448 | | @param blockSize Matched block size. It must be an odd number \>=1 . Normally, it should be |
3449 | | somewhere in the 3..11 range. |
3450 | | @param P1 The first parameter controlling the disparity smoothness. See below. |
3451 | | @param P2 The second parameter controlling the disparity smoothness. The larger the values are, |
3452 | | the smoother the disparity is. P1 is the penalty on the disparity change by plus or minus 1 |
3453 | | between neighbor pixels. P2 is the penalty on the disparity change by more than 1 between neighbor |
3454 | | pixels. The algorithm requires P2 \> P1 . See stereo_match.cpp sample where some reasonably good |
3455 | | P1 and P2 values are shown (like 8\*number_of_image_channels\*blockSize\*blockSize and |
3456 | | 32\*number_of_image_channels\*blockSize\*blockSize , respectively). |
3457 | | @param disp12MaxDiff Maximum allowed difference (in integer pixel units) in the left-right |
3458 | | disparity check. Set it to a non-positive value to disable the check. |
3459 | | @param preFilterCap Truncation value for the prefiltered image pixels. The algorithm first |
3460 | | computes x-derivative at each pixel and clips its value by [-preFilterCap, preFilterCap] interval. |
3461 | | The result values are passed to the Birchfield-Tomasi pixel cost function. |
3462 | | @param uniquenessRatio Margin in percentage by which the best (minimum) computed cost function |
3463 | | value should "win" the second best value to consider the found match correct. Normally, a value |
3464 | | within the 5-15 range is good enough. |
3465 | | @param speckleWindowSize Maximum size of smooth disparity regions to consider their noise speckles |
3466 | | and invalidate. Set it to 0 to disable speckle filtering. Otherwise, set it somewhere in the |
3467 | | 50-200 range. |
3468 | | @param speckleRange Maximum disparity variation within each connected component. If you do speckle |
3469 | | filtering, set the parameter to a positive value, it will be implicitly multiplied by 16. |
3470 | | Normally, 1 or 2 is good enough. |
3471 | | @param mode Set it to StereoSGBM::MODE_HH to run the full-scale two-pass dynamic programming |
3472 | | algorithm. It will consume O(W\*H\*numDisparities) bytes, which is large for 640x480 stereo and |
3473 | | huge for HD-size pictures. By default, it is set to false . |
3474 | | |
3475 | | The first constructor initializes StereoSGBM with all the default parameters. So, you only have to |
3476 | | set StereoSGBM::numDisparities at minimum. The second constructor enables you to set each parameter |
3477 | | to a custom value. |
3478 | | */ |
3479 | | CV_WRAP static Ptr<StereoSGBM> create(int minDisparity = 0, int numDisparities = 16, int blockSize = 3, |
3480 | | int P1 = 0, int P2 = 0, int disp12MaxDiff = 0, |
3481 | | int preFilterCap = 0, int uniquenessRatio = 0, |
3482 | | int speckleWindowSize = 0, int speckleRange = 0, |
3483 | | int mode = StereoSGBM::MODE_SGBM); |
3484 | | }; |
3485 | | |
3486 | | |
3487 | | //! cv::undistort mode |
3488 | | enum UndistortTypes |
3489 | | { |
3490 | | PROJ_SPHERICAL_ORTHO = 0, |
3491 | | PROJ_SPHERICAL_EQRECT = 1 |
3492 | | }; |
3493 | | |
3494 | | /** @brief Transforms an image to compensate for lens distortion. |
3495 | | |
3496 | | The function transforms an image to compensate radial and tangential lens distortion. |
3497 | | |
3498 | | The function is simply a combination of #initUndistortRectifyMap (with unity R ) and #remap |
3499 | | (with bilinear interpolation). See the former function for details of the transformation being |
3500 | | performed. |
3501 | | |
3502 | | Those pixels in the destination image, for which there is no correspondent pixels in the source |
3503 | | image, are filled with zeros (black color). |
3504 | | |
3505 | | A particular subset of the source image that will be visible in the corrected image can be regulated |
3506 | | by newCameraMatrix. You can use #getOptimalNewCameraMatrix to compute the appropriate |
3507 | | newCameraMatrix depending on your requirements. |
3508 | | |
3509 | | The camera matrix and the distortion parameters can be determined using #calibrateCamera. If |
3510 | | the resolution of images is different from the resolution used at the calibration stage, \f$f_x, |
3511 | | f_y, c_x\f$ and \f$c_y\f$ need to be scaled accordingly, while the distortion coefficients remain |
3512 | | the same. |
3513 | | |
3514 | | @param src Input (distorted) image. |
3515 | | @param dst Output (corrected) image that has the same size and type as src . |
3516 | | @param cameraMatrix Input camera matrix \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . |
3517 | | @param distCoeffs Input vector of distortion coefficients |
3518 | | \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ |
3519 | | of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. |
3520 | | @param newCameraMatrix Camera matrix of the distorted image. By default, it is the same as |
3521 | | cameraMatrix but you may additionally scale and shift the result by using a different matrix. |
3522 | | */ |
3523 | | CV_EXPORTS_W void undistort( InputArray src, OutputArray dst, |
3524 | | InputArray cameraMatrix, |
3525 | | InputArray distCoeffs, |
3526 | | InputArray newCameraMatrix = noArray() ); |
3527 | | |
3528 | | /** @brief Computes the undistortion and rectification transformation map. |
3529 | | |
3530 | | The function computes the joint undistortion and rectification transformation and represents the |
3531 | | result in the form of maps for #remap. The undistorted image looks like original, as if it is |
3532 | | captured with a camera using the camera matrix =newCameraMatrix and zero distortion. In case of a |
3533 | | monocular camera, newCameraMatrix is usually equal to cameraMatrix, or it can be computed by |
3534 | | #getOptimalNewCameraMatrix for a better control over scaling. In case of a stereo camera, |
3535 | | newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify . |
3536 | | |
3537 | | Also, this new camera is oriented differently in the coordinate space, according to R. That, for |
3538 | | example, helps to align two heads of a stereo camera so that the epipolar lines on both images |
3539 | | become horizontal and have the same y- coordinate (in case of a horizontally aligned stereo camera). |
3540 | | |
3541 | | The function actually builds the maps for the inverse mapping algorithm that is used by #remap. That |
3542 | | is, for each pixel \f$(u, v)\f$ in the destination (corrected and rectified) image, the function |
3543 | | computes the corresponding coordinates in the source image (that is, in the original image from |
3544 | | camera). The following process is applied: |
3545 | | \f[ |
3546 | | \begin{array}{l} |
3547 | | x \leftarrow (u - {c'}_x)/{f'}_x \\ |
3548 | | y \leftarrow (v - {c'}_y)/{f'}_y \\ |
3549 | | {[X\,Y\,W]} ^T \leftarrow R^{-1}*[x \, y \, 1]^T \\ |
3550 | | x' \leftarrow X/W \\ |
3551 | | y' \leftarrow Y/W \\ |
3552 | | r^2 \leftarrow x'^2 + y'^2 \\ |
3553 | | x'' \leftarrow x' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} |
3554 | | + 2p_1 x' y' + p_2(r^2 + 2 x'^2) + s_1 r^2 + s_2 r^4\\ |
3555 | | y'' \leftarrow y' \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6} |
3556 | | + p_1 (r^2 + 2 y'^2) + 2 p_2 x' y' + s_3 r^2 + s_4 r^4 \\ |
3557 | | s\vecthree{x'''}{y'''}{1} = |
3558 | | \vecthreethree{R_{33}(\tau_x, \tau_y)}{0}{-R_{13}((\tau_x, \tau_y)} |
3559 | | {0}{R_{33}(\tau_x, \tau_y)}{-R_{23}(\tau_x, \tau_y)} |
3560 | | {0}{0}{1} R(\tau_x, \tau_y) \vecthree{x''}{y''}{1}\\ |
3561 | | map_x(u,v) \leftarrow x''' f_x + c_x \\ |
3562 | | map_y(u,v) \leftarrow y''' f_y + c_y |
3563 | | \end{array} |
3564 | | \f] |
3565 | | where \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ |
3566 | | are the distortion coefficients. |
3567 | | |
3568 | | In case of a stereo camera, this function is called twice: once for each camera head, after |
3569 | | #stereoRectify, which in its turn is called after #stereoCalibrate. But if the stereo camera |
3570 | | was not calibrated, it is still possible to compute the rectification transformations directly from |
3571 | | the fundamental matrix using #stereoRectifyUncalibrated. For each camera, the function computes |
3572 | | homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D |
3573 | | space. R can be computed from H as |
3574 | | \f[\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\f] |
3575 | | where cameraMatrix can be chosen arbitrarily. |
3576 | | |
3577 | | @param cameraMatrix Input camera matrix \f$A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . |
3578 | | @param distCoeffs Input vector of distortion coefficients |
3579 | | \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ |
3580 | | of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. |
3581 | | @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2 , |
3582 | | computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation |
3583 | | is assumed. In #initUndistortRectifyMap R assumed to be an identity matrix. |
3584 | | @param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$. |
3585 | | @param size Undistorted image size. |
3586 | | @param m1type Type of the first output map that can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps |
3587 | | @param map1 The first output map. |
3588 | | @param map2 The second output map. |
3589 | | */ |
3590 | | CV_EXPORTS_W |
3591 | | void initUndistortRectifyMap(InputArray cameraMatrix, InputArray distCoeffs, |
3592 | | InputArray R, InputArray newCameraMatrix, |
3593 | | Size size, int m1type, OutputArray map1, OutputArray map2); |
3594 | | |
3595 | | /** @brief Computes the projection and inverse-rectification transformation map. In essense, this is the inverse of |
3596 | | #initUndistortRectifyMap to accomodate stereo-rectification of projectors ('inverse-cameras') in projector-camera pairs. |
3597 | | |
3598 | | The function computes the joint projection and inverse rectification transformation and represents the |
3599 | | result in the form of maps for #remap. The projected image looks like a distorted version of the original which, |
3600 | | once projected by a projector, should visually match the original. In case of a monocular camera, newCameraMatrix |
3601 | | is usually equal to cameraMatrix, or it can be computed by |
3602 | | #getOptimalNewCameraMatrix for a better control over scaling. In case of a projector-camera pair, |
3603 | | newCameraMatrix is normally set to P1 or P2 computed by #stereoRectify . |
3604 | | |
3605 | | The projector is oriented differently in the coordinate space, according to R. In case of projector-camera pairs, |
3606 | | this helps align the projector (in the same manner as #initUndistortRectifyMap for the camera) to create a stereo-rectified pair. This |
3607 | | allows epipolar lines on both images to become horizontal and have the same y-coordinate (in case of a horizontally aligned projector-camera pair). |
3608 | | |
3609 | | The function builds the maps for the inverse mapping algorithm that is used by #remap. That |
3610 | | is, for each pixel \f$(u, v)\f$ in the destination (projected and inverse-rectified) image, the function |
3611 | | computes the corresponding coordinates in the source image (that is, in the original digital image). The following process is applied: |
3612 | | |
3613 | | \f[ |
3614 | | \begin{array}{l} |
3615 | | \text{newCameraMatrix}\\ |
3616 | | x \leftarrow (u - {c'}_x)/{f'}_x \\ |
3617 | | y \leftarrow (v - {c'}_y)/{f'}_y \\ |
3618 | | |
3619 | | \\\text{Undistortion} |
3620 | | \\\scriptsize{\textit{though equation shown is for radial undistortion, function implements cv::undistortPoints()}}\\ |
3621 | | r^2 \leftarrow x^2 + y^2 \\ |
3622 | | \theta \leftarrow \frac{1 + k_1 r^2 + k_2 r^4 + k_3 r^6}{1 + k_4 r^2 + k_5 r^4 + k_6 r^6}\\ |
3623 | | x' \leftarrow \frac{x}{\theta} \\ |
3624 | | y' \leftarrow \frac{y}{\theta} \\ |
3625 | | |
3626 | | \\\text{Rectification}\\ |
3627 | | {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\ |
3628 | | x'' \leftarrow X/W \\ |
3629 | | y'' \leftarrow Y/W \\ |
3630 | | |
3631 | | \\\text{cameraMatrix}\\ |
3632 | | map_x(u,v) \leftarrow x'' f_x + c_x \\ |
3633 | | map_y(u,v) \leftarrow y'' f_y + c_y |
3634 | | \end{array} |
3635 | | \f] |
3636 | | where \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ |
3637 | | are the distortion coefficients vector distCoeffs. |
3638 | | |
3639 | | In case of a stereo-rectified projector-camera pair, this function is called for the projector while #initUndistortRectifyMap is called for the camera head. |
3640 | | This is done after #stereoRectify, which in turn is called after #stereoCalibrate. If the projector-camera pair |
3641 | | is not calibrated, it is still possible to compute the rectification transformations directly from |
3642 | | the fundamental matrix using #stereoRectifyUncalibrated. For the projector and camera, the function computes |
3643 | | homography H as the rectification transformation in a pixel domain, not a rotation matrix R in 3D |
3644 | | space. R can be computed from H as |
3645 | | \f[\texttt{R} = \texttt{cameraMatrix} ^{-1} \cdot \texttt{H} \cdot \texttt{cameraMatrix}\f] |
3646 | | where cameraMatrix can be chosen arbitrarily. |
3647 | | |
3648 | | @param cameraMatrix Input camera matrix \f$A=\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . |
3649 | | @param distCoeffs Input vector of distortion coefficients |
3650 | | \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ |
3651 | | of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. |
3652 | | @param R Optional rectification transformation in the object space (3x3 matrix). R1 or R2, |
3653 | | computed by #stereoRectify can be passed here. If the matrix is empty, the identity transformation |
3654 | | is assumed. |
3655 | | @param newCameraMatrix New camera matrix \f$A'=\vecthreethree{f_x'}{0}{c_x'}{0}{f_y'}{c_y'}{0}{0}{1}\f$. |
3656 | | @param size Distorted image size. |
3657 | | @param m1type Type of the first output map. Can be CV_32FC1, CV_32FC2 or CV_16SC2, see #convertMaps |
3658 | | @param map1 The first output map for #remap. |
3659 | | @param map2 The second output map for #remap. |
3660 | | */ |
3661 | | CV_EXPORTS_W |
3662 | | void initInverseRectificationMap( InputArray cameraMatrix, InputArray distCoeffs, |
3663 | | InputArray R, InputArray newCameraMatrix, |
3664 | | const Size& size, int m1type, OutputArray map1, OutputArray map2 ); |
3665 | | |
3666 | | //! initializes maps for #remap for wide-angle |
3667 | | CV_EXPORTS |
3668 | | float initWideAngleProjMap(InputArray cameraMatrix, InputArray distCoeffs, |
3669 | | Size imageSize, int destImageWidth, |
3670 | | int m1type, OutputArray map1, OutputArray map2, |
3671 | | enum UndistortTypes projType = PROJ_SPHERICAL_EQRECT, double alpha = 0); |
3672 | | static inline |
3673 | | float initWideAngleProjMap(InputArray cameraMatrix, InputArray distCoeffs, |
3674 | | Size imageSize, int destImageWidth, |
3675 | | int m1type, OutputArray map1, OutputArray map2, |
3676 | | int projType, double alpha = 0) |
3677 | 0 | { |
3678 | 0 | return initWideAngleProjMap(cameraMatrix, distCoeffs, imageSize, destImageWidth, |
3679 | 0 | m1type, map1, map2, (UndistortTypes)projType, alpha); |
3680 | 0 | } Unexecuted instantiation: core_fuzzer.cc:cv::initWideAngleProjMap(cv::_InputArray const&, cv::_InputArray const&, cv::Size_<int>, int, int, cv::_OutputArray const&, cv::_OutputArray const&, int, double) Unexecuted instantiation: filestorage_read_file_fuzzer.cc:cv::initWideAngleProjMap(cv::_InputArray const&, cv::_InputArray const&, cv::Size_<int>, int, int, cv::_OutputArray const&, cv::_OutputArray const&, int, double) Unexecuted instantiation: imdecode_fuzzer.cc:cv::initWideAngleProjMap(cv::_InputArray const&, cv::_InputArray const&, cv::Size_<int>, int, int, cv::_OutputArray const&, cv::_OutputArray const&, int, double) Unexecuted instantiation: filestorage_read_string_fuzzer.cc:cv::initWideAngleProjMap(cv::_InputArray const&, cv::_InputArray const&, cv::Size_<int>, int, int, cv::_OutputArray const&, cv::_OutputArray const&, int, double) Unexecuted instantiation: imencode_fuzzer.cc:cv::initWideAngleProjMap(cv::_InputArray const&, cv::_InputArray const&, cv::Size_<int>, int, int, cv::_OutputArray const&, cv::_OutputArray const&, int, double) Unexecuted instantiation: filestorage_read_filename_fuzzer.cc:cv::initWideAngleProjMap(cv::_InputArray const&, cv::_InputArray const&, cv::Size_<int>, int, int, cv::_OutputArray const&, cv::_OutputArray const&, int, double) Unexecuted instantiation: generateusergallerycollage_fuzzer.cc:cv::initWideAngleProjMap(cv::_InputArray const&, cv::_InputArray const&, cv::Size_<int>, int, int, cv::_OutputArray const&, cv::_OutputArray const&, int, double) Unexecuted instantiation: imread_fuzzer.cc:cv::initWideAngleProjMap(cv::_InputArray const&, cv::_InputArray const&, cv::Size_<int>, int, int, cv::_OutputArray const&, cv::_OutputArray const&, int, double) |
3681 | | |
3682 | | /** @brief Returns the default new camera matrix. |
3683 | | |
3684 | | The function returns the camera matrix that is either an exact copy of the input cameraMatrix (when |
3685 | | centerPrinicipalPoint=false ), or the modified one (when centerPrincipalPoint=true). |
3686 | | |
3687 | | In the latter case, the new camera matrix will be: |
3688 | | |
3689 | | \f[\begin{bmatrix} f_x && 0 && ( \texttt{imgSize.width} -1)*0.5 \\ 0 && f_y && ( \texttt{imgSize.height} -1)*0.5 \\ 0 && 0 && 1 \end{bmatrix} ,\f] |
3690 | | |
3691 | | where \f$f_x\f$ and \f$f_y\f$ are \f$(0,0)\f$ and \f$(1,1)\f$ elements of cameraMatrix, respectively. |
3692 | | |
3693 | | By default, the undistortion functions in OpenCV (see #initUndistortRectifyMap, #undistort) do not |
3694 | | move the principal point. However, when you work with stereo, it is important to move the principal |
3695 | | points in both views to the same y-coordinate (which is required by most of stereo correspondence |
3696 | | algorithms), and may be to the same x-coordinate too. So, you can form the new camera matrix for |
3697 | | each view where the principal points are located at the center. |
3698 | | |
3699 | | @param cameraMatrix Input camera matrix. |
3700 | | @param imgsize Camera view image size in pixels. |
3701 | | @param centerPrincipalPoint Location of the principal point in the new camera matrix. The |
3702 | | parameter indicates whether this location should be at the image center or not. |
3703 | | */ |
3704 | | CV_EXPORTS_W |
3705 | | Mat getDefaultNewCameraMatrix(InputArray cameraMatrix, Size imgsize = Size(), |
3706 | | bool centerPrincipalPoint = false); |
3707 | | |
3708 | | /** @brief Computes the ideal point coordinates from the observed point coordinates. |
3709 | | |
3710 | | The function is similar to #undistort and #initUndistortRectifyMap but it operates on a |
3711 | | sparse set of points instead of a raster image. Also the function performs a reverse transformation |
3712 | | to #projectPoints. In case of a 3D object, it does not reconstruct its 3D coordinates, but for a |
3713 | | planar object, it does, up to a translation vector, if the proper R is specified. |
3714 | | |
3715 | | For each observed point coordinate \f$(u, v)\f$ the function computes: |
3716 | | \f[ |
3717 | | \begin{array}{l} |
3718 | | x^{"} \leftarrow (u - c_x)/f_x \\ |
3719 | | y^{"} \leftarrow (v - c_y)/f_y \\ |
3720 | | (x',y') = undistort(x^{"},y^{"}, \texttt{distCoeffs}) \\ |
3721 | | {[X\,Y\,W]} ^T \leftarrow R*[x' \, y' \, 1]^T \\ |
3722 | | x \leftarrow X/W \\ |
3723 | | y \leftarrow Y/W \\ |
3724 | | \text{only performed if P is specified:} \\ |
3725 | | u' \leftarrow x {f'}_x + {c'}_x \\ |
3726 | | v' \leftarrow y {f'}_y + {c'}_y |
3727 | | \end{array} |
3728 | | \f] |
3729 | | |
3730 | | where *undistort* is an approximate iterative algorithm that estimates the normalized original |
3731 | | point coordinates out of the normalized distorted point coordinates ("normalized" means that the |
3732 | | coordinates do not depend on the camera matrix). |
3733 | | |
3734 | | The function can be used for both a stereo camera head or a monocular camera (when R is empty). |
3735 | | @param src Observed point coordinates, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or CV_64FC2) (or |
3736 | | vector\<Point2f\> ). |
3737 | | @param dst Output ideal point coordinates (1xN/Nx1 2-channel or vector\<Point2f\> ) after undistortion and reverse perspective |
3738 | | transformation. If matrix P is identity or omitted, dst will contain normalized point coordinates. |
3739 | | @param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . |
3740 | | @param distCoeffs Input vector of distortion coefficients |
3741 | | \f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6[, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ |
3742 | | of 4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are assumed. |
3743 | | @param R Rectification transformation in the object space (3x3 matrix). R1 or R2 computed by |
3744 | | #stereoRectify can be passed here. If the matrix is empty, the identity transformation is used. |
3745 | | @param P New camera matrix (3x3) or new projection matrix (3x4) \f$\begin{bmatrix} {f'}_x & 0 & {c'}_x & t_x \\ 0 & {f'}_y & {c'}_y & t_y \\ 0 & 0 & 1 & t_z \end{bmatrix}\f$. P1 or P2 computed by |
3746 | | #stereoRectify can be passed here. If the matrix is empty, the identity new camera matrix is used. |
3747 | | */ |
3748 | | CV_EXPORTS_W |
3749 | | void undistortPoints(InputArray src, OutputArray dst, |
3750 | | InputArray cameraMatrix, InputArray distCoeffs, |
3751 | | InputArray R = noArray(), InputArray P = noArray()); |
3752 | | /** @overload |
3753 | | @note Default version of #undistortPoints does 5 iterations to compute undistorted points. |
3754 | | */ |
3755 | | CV_EXPORTS_AS(undistortPointsIter) |
3756 | | void undistortPoints(InputArray src, OutputArray dst, |
3757 | | InputArray cameraMatrix, InputArray distCoeffs, |
3758 | | InputArray R, InputArray P, TermCriteria criteria); |
3759 | | |
3760 | | /** |
3761 | | * @brief Compute undistorted image points position |
3762 | | * |
3763 | | * @param src Observed points position, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel (CV_32FC2 or |
3764 | | CV_64FC2) (or vector\<Point2f\> ). |
3765 | | * @param dst Output undistorted points position (1xN/Nx1 2-channel or vector\<Point2f\> ). |
3766 | | * @param cameraMatrix Camera matrix \f$\vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . |
3767 | | * @param distCoeffs Distortion coefficients |
3768 | | */ |
3769 | | CV_EXPORTS_W |
3770 | | void undistortImagePoints(InputArray src, OutputArray dst, InputArray cameraMatrix, |
3771 | | InputArray distCoeffs, |
3772 | | TermCriteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 5, |
3773 | | 0.01)); |
3774 | | |
3775 | | //! @} calib3d |
3776 | | |
3777 | | /** @brief The methods in this namespace use a so-called fisheye camera model. |
3778 | | @ingroup calib3d_fisheye |
3779 | | */ |
3780 | | namespace fisheye |
3781 | | { |
3782 | | //! @addtogroup calib3d_fisheye |
3783 | | //! @{ |
3784 | | |
3785 | | enum{ |
3786 | | CALIB_USE_INTRINSIC_GUESS = 1 << 0, |
3787 | | CALIB_RECOMPUTE_EXTRINSIC = 1 << 1, |
3788 | | CALIB_CHECK_COND = 1 << 2, |
3789 | | CALIB_FIX_SKEW = 1 << 3, |
3790 | | CALIB_FIX_K1 = 1 << 4, |
3791 | | CALIB_FIX_K2 = 1 << 5, |
3792 | | CALIB_FIX_K3 = 1 << 6, |
3793 | | CALIB_FIX_K4 = 1 << 7, |
3794 | | CALIB_FIX_INTRINSIC = 1 << 8, |
3795 | | CALIB_FIX_PRINCIPAL_POINT = 1 << 9, |
3796 | | CALIB_ZERO_DISPARITY = 1 << 10, |
3797 | | CALIB_FIX_FOCAL_LENGTH = 1 << 11 |
3798 | | }; |
3799 | | |
3800 | | /** @brief Projects points using fisheye model |
3801 | | |
3802 | | @param objectPoints Array of object points, 1xN/Nx1 3-channel (or vector\<Point3f\> ), where N is |
3803 | | the number of points in the view. |
3804 | | @param imagePoints Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or |
3805 | | vector\<Point2f\>. |
3806 | | @param affine |
3807 | | @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. |
3808 | | @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. |
3809 | | @param alpha The skew coefficient. |
3810 | | @param jacobian Optional output 2Nx15 jacobian matrix of derivatives of image points with respect |
3811 | | to components of the focal lengths, coordinates of the principal point, distortion coefficients, |
3812 | | rotation vector, translation vector, and the skew. In the old interface different components of |
3813 | | the jacobian are returned via different output parameters. |
3814 | | |
3815 | | The function computes projections of 3D points to the image plane given intrinsic and extrinsic |
3816 | | camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of |
3817 | | image points coordinates (as functions of all the input parameters) with respect to the particular |
3818 | | parameters, intrinsic and/or extrinsic. |
3819 | | */ |
3820 | | CV_EXPORTS void projectPoints(InputArray objectPoints, OutputArray imagePoints, const Affine3d& affine, |
3821 | | InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray()); |
3822 | | |
3823 | | /** @overload */ |
3824 | | CV_EXPORTS_W void projectPoints(InputArray objectPoints, OutputArray imagePoints, InputArray rvec, InputArray tvec, |
3825 | | InputArray K, InputArray D, double alpha = 0, OutputArray jacobian = noArray()); |
3826 | | |
3827 | | /** @brief Distorts 2D points using fisheye model. |
3828 | | |
3829 | | @param undistorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is |
3830 | | the number of points in the view. |
3831 | | @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. |
3832 | | @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. |
3833 | | @param alpha The skew coefficient. |
3834 | | @param distorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> . |
3835 | | |
3836 | | Note that the function assumes the camera intrinsic matrix of the undistorted points to be identity. |
3837 | | This means if you want to distort image points you have to multiply them with \f$K^{-1}\f$. |
3838 | | */ |
3839 | | CV_EXPORTS_W void distortPoints(InputArray undistorted, OutputArray distorted, InputArray K, InputArray D, double alpha = 0); |
3840 | | |
3841 | | /** @brief Undistorts 2D points using fisheye model |
3842 | | |
3843 | | @param distorted Array of object points, 1xN/Nx1 2-channel (or vector\<Point2f\> ), where N is the |
3844 | | number of points in the view. |
3845 | | @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. |
3846 | | @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. |
3847 | | @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 |
3848 | | 1-channel or 1x1 3-channel |
3849 | | @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) |
3850 | | @param criteria Termination criteria |
3851 | | @param undistorted Output array of image points, 1xN/Nx1 2-channel, or vector\<Point2f\> . |
3852 | | */ |
3853 | | CV_EXPORTS_W void undistortPoints(InputArray distorted, OutputArray undistorted, |
3854 | | InputArray K, InputArray D, InputArray R = noArray(), InputArray P = noArray(), |
3855 | | TermCriteria criteria = TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 10, 1e-8)); |
3856 | | |
3857 | | /** @brief Computes undistortion and rectification maps for image transform by #remap. If D is empty zero |
3858 | | distortion is used, if R or P is empty identity matrixes are used. |
3859 | | |
3860 | | @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. |
3861 | | @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. |
3862 | | @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 |
3863 | | 1-channel or 1x1 3-channel |
3864 | | @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) |
3865 | | @param size Undistorted image size. |
3866 | | @param m1type Type of the first output map that can be CV_32FC1 or CV_16SC2 . See #convertMaps |
3867 | | for details. |
3868 | | @param map1 The first output map. |
3869 | | @param map2 The second output map. |
3870 | | */ |
3871 | | CV_EXPORTS_W void initUndistortRectifyMap(InputArray K, InputArray D, InputArray R, InputArray P, |
3872 | | const cv::Size& size, int m1type, OutputArray map1, OutputArray map2); |
3873 | | |
3874 | | /** @brief Transforms an image to compensate for fisheye lens distortion. |
3875 | | |
3876 | | @param distorted image with fisheye lens distortion. |
3877 | | @param undistorted Output image with compensated fisheye lens distortion. |
3878 | | @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. |
3879 | | @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. |
3880 | | @param Knew Camera intrinsic matrix of the distorted image. By default, it is the identity matrix but you |
3881 | | may additionally scale and shift the result by using a different matrix. |
3882 | | @param new_size the new size |
3883 | | |
3884 | | The function transforms an image to compensate radial and tangential lens distortion. |
3885 | | |
3886 | | The function is simply a combination of #fisheye::initUndistortRectifyMap (with unity R ) and #remap |
3887 | | (with bilinear interpolation). See the former function for details of the transformation being |
3888 | | performed. |
3889 | | |
3890 | | See below the results of undistortImage. |
3891 | | - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3, |
3892 | | k_4, k_5, k_6) of distortion were optimized under calibration) |
3893 | | - b\) result of #fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2, |
3894 | | k_3, k_4) of fisheye distortion were optimized under calibration) |
3895 | | - c\) original image was captured with fisheye lens |
3896 | | |
3897 | | Pictures a) and b) almost the same. But if we consider points of image located far from the center |
3898 | | of image, we can notice that on image a) these points are distorted. |
3899 | | |
3900 | |  |
3901 | | */ |
3902 | | CV_EXPORTS_W void undistortImage(InputArray distorted, OutputArray undistorted, |
3903 | | InputArray K, InputArray D, InputArray Knew = cv::noArray(), const Size& new_size = Size()); |
3904 | | |
3905 | | /** @brief Estimates new camera intrinsic matrix for undistortion or rectification. |
3906 | | |
3907 | | @param K Camera intrinsic matrix \f$cameramatrix{K}\f$. |
3908 | | @param image_size Size of the image |
3909 | | @param D Input vector of distortion coefficients \f$\distcoeffsfisheye\f$. |
3910 | | @param R Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3 |
3911 | | 1-channel or 1x1 3-channel |
3912 | | @param P New camera intrinsic matrix (3x3) or new projection matrix (3x4) |
3913 | | @param balance Sets the new focal length in range between the min focal length and the max focal |
3914 | | length. Balance is in range of [0, 1]. |
3915 | | @param new_size the new size |
3916 | | @param fov_scale Divisor for new focal length. |
3917 | | */ |
3918 | | CV_EXPORTS_W void estimateNewCameraMatrixForUndistortRectify(InputArray K, InputArray D, const Size &image_size, InputArray R, |
3919 | | OutputArray P, double balance = 0.0, const Size& new_size = Size(), double fov_scale = 1.0); |
3920 | | |
3921 | | /** @brief Performs camera calibration |
3922 | | |
3923 | | @param objectPoints vector of vectors of calibration pattern points in the calibration pattern |
3924 | | coordinate space. |
3925 | | @param imagePoints vector of vectors of the projections of calibration pattern points. |
3926 | | imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to |
3927 | | objectPoints[i].size() for each i. |
3928 | | @param image_size Size of the image used only to initialize the camera intrinsic matrix. |
3929 | | @param K Output 3x3 floating-point camera intrinsic matrix |
3930 | | \f$\cameramatrix{A}\f$ . If |
3931 | | @ref fisheye::CALIB_USE_INTRINSIC_GUESS is specified, some or all of fx, fy, cx, cy must be |
3932 | | initialized before calling the function. |
3933 | | @param D Output vector of distortion coefficients \f$\distcoeffsfisheye\f$. |
3934 | | @param rvecs Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view. |
3935 | | That is, each k-th rotation vector together with the corresponding k-th translation vector (see |
3936 | | the next output parameter description) brings the calibration pattern from the model coordinate |
3937 | | space (in which object points are specified) to the world coordinate space, that is, a real |
3938 | | position of the calibration pattern in the k-th pattern view (k=0.. *M* -1). |
3939 | | @param tvecs Output vector of translation vectors estimated for each pattern view. |
3940 | | @param flags Different flags that may be zero or a combination of the following values: |
3941 | | - @ref fisheye::CALIB_USE_INTRINSIC_GUESS cameraMatrix contains valid initial values of |
3942 | | fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image |
3943 | | center ( imageSize is used), and focal distances are computed in a least-squares fashion. |
3944 | | - @ref fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration |
3945 | | of intrinsic optimization. |
3946 | | - @ref fisheye::CALIB_CHECK_COND The functions will check validity of condition number. |
3947 | | - @ref fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero. |
3948 | | - @ref fisheye::CALIB_FIX_K1,..., @ref fisheye::CALIB_FIX_K4 Selected distortion coefficients |
3949 | | are set to zeros and stay zero. |
3950 | | - @ref fisheye::CALIB_FIX_PRINCIPAL_POINT The principal point is not changed during the global |
3951 | | optimization. It stays at the center or at a different location specified when @ref fisheye::CALIB_USE_INTRINSIC_GUESS is set too. |
3952 | | - @ref fisheye::CALIB_FIX_FOCAL_LENGTH The focal length is not changed during the global |
3953 | | optimization. It is the \f$max(width,height)/\pi\f$ or the provided \f$f_x\f$, \f$f_y\f$ when @ref fisheye::CALIB_USE_INTRINSIC_GUESS is set too. |
3954 | | @param criteria Termination criteria for the iterative optimization algorithm. |
3955 | | */ |
3956 | | CV_EXPORTS_W double calibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints, const Size& image_size, |
3957 | | InputOutputArray K, InputOutputArray D, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = 0, |
3958 | | TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); |
3959 | | |
3960 | | /** @brief Stereo rectification for fisheye camera model |
3961 | | |
3962 | | @param K1 First camera intrinsic matrix. |
3963 | | @param D1 First camera distortion parameters. |
3964 | | @param K2 Second camera intrinsic matrix. |
3965 | | @param D2 Second camera distortion parameters. |
3966 | | @param imageSize Size of the image used for stereo calibration. |
3967 | | @param R Rotation matrix between the coordinate systems of the first and the second |
3968 | | cameras. |
3969 | | @param tvec Translation vector between coordinate systems of the cameras. |
3970 | | @param R1 Output 3x3 rectification transform (rotation matrix) for the first camera. |
3971 | | @param R2 Output 3x3 rectification transform (rotation matrix) for the second camera. |
3972 | | @param P1 Output 3x4 projection matrix in the new (rectified) coordinate systems for the first |
3973 | | camera. |
3974 | | @param P2 Output 3x4 projection matrix in the new (rectified) coordinate systems for the second |
3975 | | camera. |
3976 | | @param Q Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see #reprojectImageTo3D ). |
3977 | | @param flags Operation flags that may be zero or @ref fisheye::CALIB_ZERO_DISPARITY . If the flag is set, |
3978 | | the function makes the principal points of each camera have the same pixel coordinates in the |
3979 | | rectified views. And if the flag is not set, the function may still shift the images in the |
3980 | | horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the |
3981 | | useful image area. |
3982 | | @param newImageSize New image resolution after rectification. The same size should be passed to |
3983 | | #initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0) |
3984 | | is passed (default), it is set to the original imageSize . Setting it to larger value can help you |
3985 | | preserve details in the original image, especially when there is a big radial distortion. |
3986 | | @param balance Sets the new focal length in range between the min focal length and the max focal |
3987 | | length. Balance is in range of [0, 1]. |
3988 | | @param fov_scale Divisor for new focal length. |
3989 | | */ |
3990 | | CV_EXPORTS_W void stereoRectify(InputArray K1, InputArray D1, InputArray K2, InputArray D2, const Size &imageSize, InputArray R, InputArray tvec, |
3991 | | OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags, const Size &newImageSize = Size(), |
3992 | | double balance = 0.0, double fov_scale = 1.0); |
3993 | | |
3994 | | /** @brief Performs stereo calibration |
3995 | | |
3996 | | @param objectPoints Vector of vectors of the calibration pattern points. |
3997 | | @param imagePoints1 Vector of vectors of the projections of the calibration pattern points, |
3998 | | observed by the first camera. |
3999 | | @param imagePoints2 Vector of vectors of the projections of the calibration pattern points, |
4000 | | observed by the second camera. |
4001 | | @param K1 Input/output first camera intrinsic matrix: |
4002 | | \f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If |
4003 | | any of @ref fisheye::CALIB_USE_INTRINSIC_GUESS , @ref fisheye::CALIB_FIX_INTRINSIC are specified, |
4004 | | some or all of the matrix components must be initialized. |
4005 | | @param D1 Input/output vector of distortion coefficients \f$\distcoeffsfisheye\f$ of 4 elements. |
4006 | | @param K2 Input/output second camera intrinsic matrix. The parameter is similar to K1 . |
4007 | | @param D2 Input/output lens distortion coefficients for the second camera. The parameter is |
4008 | | similar to D1 . |
4009 | | @param imageSize Size of the image used only to initialize camera intrinsic matrix. |
4010 | | @param R Output rotation matrix between the 1st and the 2nd camera coordinate systems. |
4011 | | @param T Output translation vector between the coordinate systems of the cameras. |
4012 | | @param rvecs Output vector of rotation vectors ( @ref Rodrigues ) estimated for each pattern view in the |
4013 | | coordinate system of the first camera of the stereo pair (e.g. std::vector<cv::Mat>). More in detail, each |
4014 | | i-th rotation vector together with the corresponding i-th translation vector (see the next output parameter |
4015 | | description) brings the calibration pattern from the object coordinate space (in which object points are |
4016 | | specified) to the camera coordinate space of the first camera of the stereo pair. In more technical terms, |
4017 | | the tuple of the i-th rotation and translation vector performs a change of basis from object coordinate space |
4018 | | to camera coordinate space of the first camera of the stereo pair. |
4019 | | @param tvecs Output vector of translation vectors estimated for each pattern view, see parameter description |
4020 | | of previous output parameter ( rvecs ). |
4021 | | @param flags Different flags that may be zero or a combination of the following values: |
4022 | | - @ref fisheye::CALIB_FIX_INTRINSIC Fix K1, K2? and D1, D2? so that only R, T matrices |
4023 | | are estimated. |
4024 | | - @ref fisheye::CALIB_USE_INTRINSIC_GUESS K1, K2 contains valid initial values of |
4025 | | fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image |
4026 | | center (imageSize is used), and focal distances are computed in a least-squares fashion. |
4027 | | - @ref fisheye::CALIB_RECOMPUTE_EXTRINSIC Extrinsic will be recomputed after each iteration |
4028 | | of intrinsic optimization. |
4029 | | - @ref fisheye::CALIB_CHECK_COND The functions will check validity of condition number. |
4030 | | - @ref fisheye::CALIB_FIX_SKEW Skew coefficient (alpha) is set to zero and stay zero. |
4031 | | - @ref fisheye::CALIB_FIX_K1,..., @ref fisheye::CALIB_FIX_K4 Selected distortion coefficients are set to zeros and stay |
4032 | | zero. |
4033 | | @param criteria Termination criteria for the iterative optimization algorithm. |
4034 | | */ |
4035 | | CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, |
4036 | | InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize, |
4037 | | OutputArray R, OutputArray T, OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs, int flags = fisheye::CALIB_FIX_INTRINSIC, |
4038 | | TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); |
4039 | | |
4040 | | /// @overload |
4041 | | CV_EXPORTS_W double stereoCalibrate(InputArrayOfArrays objectPoints, InputArrayOfArrays imagePoints1, InputArrayOfArrays imagePoints2, |
4042 | | InputOutputArray K1, InputOutputArray D1, InputOutputArray K2, InputOutputArray D2, Size imageSize, |
4043 | | OutputArray R, OutputArray T, int flags = fisheye::CALIB_FIX_INTRINSIC, |
4044 | | TermCriteria criteria = TermCriteria(TermCriteria::COUNT + TermCriteria::EPS, 100, DBL_EPSILON)); |
4045 | | |
4046 | | //! @} calib3d_fisheye |
4047 | | } // end namespace fisheye |
4048 | | |
4049 | | } //end namespace cv |
4050 | | |
4051 | | #if 0 //def __cplusplus |
4052 | | ////////////////////////////////////////////////////////////////////////////////////////// |
4053 | | class CV_EXPORTS CvLevMarq |
4054 | | { |
4055 | | public: |
4056 | | CvLevMarq(); |
4057 | | CvLevMarq( int nparams, int nerrs, CvTermCriteria criteria= |
4058 | | cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON), |
4059 | | bool completeSymmFlag=false ); |
4060 | | ~CvLevMarq(); |
4061 | | void init( int nparams, int nerrs, CvTermCriteria criteria= |
4062 | | cvTermCriteria(CV_TERMCRIT_EPS+CV_TERMCRIT_ITER,30,DBL_EPSILON), |
4063 | | bool completeSymmFlag=false ); |
4064 | | bool update( const CvMat*& param, CvMat*& J, CvMat*& err ); |
4065 | | bool updateAlt( const CvMat*& param, CvMat*& JtJ, CvMat*& JtErr, double*& errNorm ); |
4066 | | |
4067 | | void clear(); |
4068 | | void step(); |
4069 | | enum { DONE=0, STARTED=1, CALC_J=2, CHECK_ERR=3 }; |
4070 | | |
4071 | | cv::Ptr<CvMat> mask; |
4072 | | cv::Ptr<CvMat> prevParam; |
4073 | | cv::Ptr<CvMat> param; |
4074 | | cv::Ptr<CvMat> J; |
4075 | | cv::Ptr<CvMat> err; |
4076 | | cv::Ptr<CvMat> JtJ; |
4077 | | cv::Ptr<CvMat> JtJN; |
4078 | | cv::Ptr<CvMat> JtErr; |
4079 | | cv::Ptr<CvMat> JtJV; |
4080 | | cv::Ptr<CvMat> JtJW; |
4081 | | double prevErrNorm, errNorm; |
4082 | | int lambdaLg10; |
4083 | | CvTermCriteria criteria; |
4084 | | int state; |
4085 | | int iters; |
4086 | | bool completeSymmFlag; |
4087 | | int solveMethod; |
4088 | | }; |
4089 | | #endif |
4090 | | |
4091 | | #endif |