public class opencv_calib3d extends opencv_calib3d
| Modifier and Type | Class and Description |
|---|---|
static class |
opencv_calib3d.CirclesGridFinderParameters |
static class |
opencv_calib3d.CirclesGridFinderParameters2 |
static class |
opencv_calib3d.CvLevMarq
\} calib3d_c
|
static class |
opencv_calib3d.CvPOSITObject
\
Camera Calibration, Pose Estimation and Stereo *
\
|
static class |
opencv_calib3d.CvStereoBMState |
static class |
opencv_calib3d.StereoBM
\brief Class for computing stereo correspondence using the block matching algorithm, introduced and
contributed to OpenCV by K.
|
static class |
opencv_calib3d.StereoMatcher
\brief The base class for stereo correspondence algorithms.
|
static class |
opencv_calib3d.StereoSGBM
\brief The class implements the modified H.
|
opencv_calib3d.AbstractCvPOSITObject, opencv_calib3d.AbstractCvStereoBMState| Modifier and Type | Field and Description |
|---|---|
static int |
CALIB_CB_ADAPTIVE_THRESH
enum cv::
|
static int |
CALIB_CB_ASYMMETRIC_GRID
enum cv::
|
static int |
CALIB_CB_CLUSTERING
enum cv::
|
static int |
CALIB_CB_FAST_CHECK
enum cv::
|
static int |
CALIB_CB_FILTER_QUADS
enum cv::
|
static int |
CALIB_CB_NORMALIZE_IMAGE
enum cv::
|
static int |
CALIB_CB_SYMMETRIC_GRID
enum cv::
|
static int |
CALIB_FIX_ASPECT_RATIO
enum cv::
|
static int |
CALIB_FIX_FOCAL_LENGTH
enum cv::
|
static int |
CALIB_FIX_INTRINSIC
enum cv::
|
static int |
CALIB_FIX_K1
enum cv::
|
static int |
CALIB_FIX_K2
enum cv::
|
static int |
CALIB_FIX_K3
enum cv::
|
static int |
CALIB_FIX_K4
enum cv::
|
static int |
CALIB_FIX_K5
enum cv::
|
static int |
CALIB_FIX_K6
enum cv::
|
static int |
CALIB_FIX_PRINCIPAL_POINT
enum cv::
|
static int |
CALIB_FIX_S1_S2_S3_S4
enum cv::
|
static int |
CALIB_FIX_TANGENT_DIST
enum cv::
|
static int |
CALIB_FIX_TAUX_TAUY
enum cv::
|
static int |
CALIB_RATIONAL_MODEL
enum cv::
|
static int |
CALIB_SAME_FOCAL_LENGTH
enum cv::
|
static int |
CALIB_THIN_PRISM_MODEL
enum cv::
|
static int |
CALIB_TILTED_MODEL
enum cv::
|
static int |
CALIB_USE_EXTRINSIC_GUESS
enum cv::
|
static int |
CALIB_USE_INTRINSIC_GUESS
enum cv::
|
static int |
CALIB_USE_LU
enum cv::
|
static int |
CALIB_USE_QR
enum cv::
|
static int |
CALIB_ZERO_DISPARITY
enum cv::
|
static int |
CALIB_ZERO_TANGENT_DIST
enum cv::
|
static int |
CV_CALIB_CB_ADAPTIVE_THRESH |
static int |
CV_CALIB_CB_FAST_CHECK |
static int |
CV_CALIB_CB_FILTER_QUADS |
static int |
CV_CALIB_CB_NORMALIZE_IMAGE |
static int |
CV_CALIB_FIX_ASPECT_RATIO |
static int |
CV_CALIB_FIX_FOCAL_LENGTH |
static int |
CV_CALIB_FIX_INTRINSIC |
static int |
CV_CALIB_FIX_K1 |
static int |
CV_CALIB_FIX_K2 |
static int |
CV_CALIB_FIX_K3 |
static int |
CV_CALIB_FIX_K4 |
static int |
CV_CALIB_FIX_K5 |
static int |
CV_CALIB_FIX_K6 |
static int |
CV_CALIB_FIX_PRINCIPAL_POINT |
static int |
CV_CALIB_FIX_S1_S2_S3_S4 |
static int |
CV_CALIB_FIX_TANGENT_DIST |
static int |
CV_CALIB_FIX_TAUX_TAUY |
static int |
CV_CALIB_NINTRINSIC |
static int |
CV_CALIB_RATIONAL_MODEL |
static int |
CV_CALIB_SAME_FOCAL_LENGTH |
static int |
CV_CALIB_THIN_PRISM_MODEL |
static int |
CV_CALIB_TILTED_MODEL |
static int |
CV_CALIB_USE_INTRINSIC_GUESS |
static int |
CV_CALIB_ZERO_DISPARITY |
static int |
CV_CALIB_ZERO_TANGENT_DIST |
static int |
CV_DLS
enum
|
static int |
CV_EPNP
enum
|
static int |
CV_FM_7POINT |
static int |
CV_FM_8POINT |
static int |
CV_FM_LMEDS |
static int |
CV_FM_LMEDS_ONLY |
static int |
CV_FM_RANSAC |
static int |
CV_FM_RANSAC_ONLY |
static int |
CV_ITERATIVE
enum
|
static int |
CV_LMEDS |
static int |
CV_P3P
enum
|
static int |
CV_RANSAC |
static int |
CV_STEREO_BM_BASIC |
static int |
CV_STEREO_BM_FISH_EYE |
static int |
CV_STEREO_BM_NARROW |
static int |
CV_STEREO_BM_NORMALIZED_RESPONSE |
static int |
CV_STEREO_BM_XSOBEL |
static int |
FISHEYE_CALIB_CHECK_COND
enum cv::fisheye::
|
static int |
FISHEYE_CALIB_FIX_INTRINSIC
enum cv::fisheye::
|
static int |
FISHEYE_CALIB_FIX_K1
enum cv::fisheye::
|
static int |
FISHEYE_CALIB_FIX_K2
enum cv::fisheye::
|
static int |
FISHEYE_CALIB_FIX_K3
enum cv::fisheye::
|
static int |
FISHEYE_CALIB_FIX_K4
enum cv::fisheye::
|
static int |
FISHEYE_CALIB_FIX_PRINCIPAL_POINT
enum cv::fisheye::
|
static int |
FISHEYE_CALIB_FIX_SKEW
enum cv::fisheye::
|
static int |
FISHEYE_CALIB_RECOMPUTE_EXTRINSIC
enum cv::fisheye::
|
static int |
FISHEYE_CALIB_USE_INTRINSIC_GUESS
enum cv::fisheye::
|
static int |
FM_7POINT
enum cv::
|
static int |
FM_8POINT
enum cv::
|
static int |
FM_LMEDS
enum cv::
|
static int |
FM_RANSAC
enum cv::
|
static int |
LMEDS
enum cv::
|
static int |
RANSAC
enum cv::
|
static int |
RHO
enum cv::
|
static int |
SOLVEPNP_AP3P
enum cv::
|
static int |
SOLVEPNP_DLS
enum cv::
|
static int |
SOLVEPNP_EPNP
enum cv::
|
static int |
SOLVEPNP_ITERATIVE
enum cv::
|
static int |
SOLVEPNP_MAX_COUNT
enum cv::
|
static int |
SOLVEPNP_P3P
enum cv::
|
static int |
SOLVEPNP_UPNP
enum cv::
|
| Constructor and Description |
|---|
opencv_calib3d() |
mappublic static final int CV_FM_7POINT
public static final int CV_FM_8POINT
public static final int CV_LMEDS
public static final int CV_RANSAC
public static final int CV_FM_LMEDS_ONLY
public static final int CV_FM_RANSAC_ONLY
public static final int CV_FM_LMEDS
public static final int CV_FM_RANSAC
public static final int CV_ITERATIVE
public static final int CV_EPNP
public static final int CV_P3P
public static final int CV_DLS
public static final int CV_CALIB_CB_ADAPTIVE_THRESH
public static final int CV_CALIB_CB_NORMALIZE_IMAGE
public static final int CV_CALIB_CB_FILTER_QUADS
public static final int CV_CALIB_CB_FAST_CHECK
public static final int CV_CALIB_USE_INTRINSIC_GUESS
public static final int CV_CALIB_FIX_ASPECT_RATIO
public static final int CV_CALIB_FIX_PRINCIPAL_POINT
public static final int CV_CALIB_ZERO_TANGENT_DIST
public static final int CV_CALIB_FIX_FOCAL_LENGTH
public static final int CV_CALIB_FIX_K1
public static final int CV_CALIB_FIX_K2
public static final int CV_CALIB_FIX_K3
public static final int CV_CALIB_FIX_K4
public static final int CV_CALIB_FIX_K5
public static final int CV_CALIB_FIX_K6
public static final int CV_CALIB_RATIONAL_MODEL
public static final int CV_CALIB_THIN_PRISM_MODEL
public static final int CV_CALIB_FIX_S1_S2_S3_S4
public static final int CV_CALIB_TILTED_MODEL
public static final int CV_CALIB_FIX_TAUX_TAUY
public static final int CV_CALIB_FIX_TANGENT_DIST
public static final int CV_CALIB_NINTRINSIC
public static final int CV_CALIB_FIX_INTRINSIC
public static final int CV_CALIB_SAME_FOCAL_LENGTH
public static final int CV_CALIB_ZERO_DISPARITY
public static final int CV_STEREO_BM_NORMALIZED_RESPONSE
public static final int CV_STEREO_BM_XSOBEL
public static final int CV_STEREO_BM_BASIC
public static final int CV_STEREO_BM_FISH_EYE
public static final int CV_STEREO_BM_NARROW
public static final int LMEDS
public static final int RANSAC
public static final int RHO
public static final int SOLVEPNP_ITERATIVE
public static final int SOLVEPNP_EPNP
public static final int SOLVEPNP_P3P
public static final int SOLVEPNP_DLS
public static final int SOLVEPNP_UPNP
public static final int SOLVEPNP_AP3P
public static final int SOLVEPNP_MAX_COUNT
public static final int CALIB_CB_ADAPTIVE_THRESH
public static final int CALIB_CB_NORMALIZE_IMAGE
public static final int CALIB_CB_FILTER_QUADS
public static final int CALIB_CB_FAST_CHECK
public static final int CALIB_CB_SYMMETRIC_GRID
public static final int CALIB_CB_ASYMMETRIC_GRID
public static final int CALIB_CB_CLUSTERING
public static final int CALIB_USE_INTRINSIC_GUESS
public static final int CALIB_FIX_ASPECT_RATIO
public static final int CALIB_FIX_PRINCIPAL_POINT
public static final int CALIB_ZERO_TANGENT_DIST
public static final int CALIB_FIX_FOCAL_LENGTH
public static final int CALIB_FIX_K1
public static final int CALIB_FIX_K2
public static final int CALIB_FIX_K3
public static final int CALIB_FIX_K4
public static final int CALIB_FIX_K5
public static final int CALIB_FIX_K6
public static final int CALIB_RATIONAL_MODEL
public static final int CALIB_THIN_PRISM_MODEL
public static final int CALIB_FIX_S1_S2_S3_S4
public static final int CALIB_TILTED_MODEL
public static final int CALIB_FIX_TAUX_TAUY
public static final int CALIB_USE_QR
public static final int CALIB_FIX_TANGENT_DIST
public static final int CALIB_FIX_INTRINSIC
public static final int CALIB_SAME_FOCAL_LENGTH
public static final int CALIB_ZERO_DISPARITY
public static final int CALIB_USE_LU
public static final int CALIB_USE_EXTRINSIC_GUESS
public static final int FM_7POINT
public static final int FM_8POINT
public static final int FM_LMEDS
public static final int FM_RANSAC
public static final int FISHEYE_CALIB_USE_INTRINSIC_GUESS
public static final int FISHEYE_CALIB_RECOMPUTE_EXTRINSIC
public static final int FISHEYE_CALIB_CHECK_COND
public static final int FISHEYE_CALIB_FIX_SKEW
public static final int FISHEYE_CALIB_FIX_K1
public static final int FISHEYE_CALIB_FIX_K2
public static final int FISHEYE_CALIB_FIX_K3
public static final int FISHEYE_CALIB_FIX_K4
public static final int FISHEYE_CALIB_FIX_INTRINSIC
public static final int FISHEYE_CALIB_FIX_PRINCIPAL_POINT
public static opencv_calib3d.CvPOSITObject cvCreatePOSITObject(opencv_core.CvPoint3D32f points, int point_count)
public static opencv_calib3d.CvPOSITObject cvCreatePOSITObject(@Cast(value="CvPoint3D32f*") FloatBuffer points, int point_count)
public static opencv_calib3d.CvPOSITObject cvCreatePOSITObject(@Cast(value="CvPoint3D32f*") float[] points, int point_count)
public static void cvPOSIT(opencv_calib3d.CvPOSITObject posit_object, opencv_core.CvPoint2D32f image_points, double focal_length, @ByVal opencv_core.CvTermCriteria criteria, org.bytedeco.javacpp.FloatPointer rotation_matrix, org.bytedeco.javacpp.FloatPointer translation_vector)
public static void cvPOSIT(opencv_calib3d.CvPOSITObject posit_object, @Cast(value="CvPoint2D32f*") FloatBuffer image_points, double focal_length, @ByVal opencv_core.CvTermCriteria criteria, FloatBuffer rotation_matrix, FloatBuffer translation_vector)
public static void cvPOSIT(opencv_calib3d.CvPOSITObject posit_object, @Cast(value="CvPoint2D32f*") float[] image_points, double focal_length, @ByVal opencv_core.CvTermCriteria criteria, float[] rotation_matrix, float[] translation_vector)
public static void cvReleasePOSITObject(@Cast(value="CvPOSITObject**")
org.bytedeco.javacpp.PointerPointer posit_object)
public static void cvReleasePOSITObject(@ByPtrPtr
opencv_calib3d.CvPOSITObject posit_object)
public static int cvRANSACUpdateNumIters(double p,
double err_prob,
int model_points,
int max_iters)
public static void cvConvertPointsHomogeneous(@Const
opencv_core.CvMat src,
opencv_core.CvMat dst)
public static int cvFindFundamentalMat(@Const
opencv_core.CvMat points1,
@Const
opencv_core.CvMat points2,
opencv_core.CvMat fundamental_matrix,
int method,
double param1,
double param2,
opencv_core.CvMat status)
public static int cvFindFundamentalMat(@Const
opencv_core.CvMat points1,
@Const
opencv_core.CvMat points2,
opencv_core.CvMat fundamental_matrix)
public static void cvComputeCorrespondEpilines(@Const
opencv_core.CvMat points,
int which_image,
@Const
opencv_core.CvMat fundamental_matrix,
opencv_core.CvMat correspondent_lines)
public static void cvTriangulatePoints(opencv_core.CvMat projMatr1, opencv_core.CvMat projMatr2, opencv_core.CvMat projPoints1, opencv_core.CvMat projPoints2, opencv_core.CvMat points4D)
public static void cvCorrectMatches(opencv_core.CvMat F, opencv_core.CvMat points1, opencv_core.CvMat points2, opencv_core.CvMat new_points1, opencv_core.CvMat new_points2)
public static void cvGetOptimalNewCameraMatrix(@Const
opencv_core.CvMat camera_matrix,
@Const
opencv_core.CvMat dist_coeffs,
@ByVal
opencv_core.CvSize image_size,
double alpha,
opencv_core.CvMat new_camera_matrix,
@ByVal(nullValue="CvSize(cvSize(0,0))")
opencv_core.CvSize new_imag_size,
opencv_core.CvRect valid_pixel_ROI,
int center_principal_point)
public static void cvGetOptimalNewCameraMatrix(@Const
opencv_core.CvMat camera_matrix,
@Const
opencv_core.CvMat dist_coeffs,
@ByVal
opencv_core.CvSize image_size,
double alpha,
opencv_core.CvMat new_camera_matrix)
public static int cvRodrigues2(@Const
opencv_core.CvMat src,
opencv_core.CvMat dst,
opencv_core.CvMat jacobian)
public static int cvRodrigues2(@Const
opencv_core.CvMat src,
opencv_core.CvMat dst)
public static int cvFindHomography(@Const
opencv_core.CvMat src_points,
@Const
opencv_core.CvMat dst_points,
opencv_core.CvMat homography,
int method,
double ransacReprojThreshold,
opencv_core.CvMat mask,
int maxIters,
double confidence)
public static int cvFindHomography(@Const
opencv_core.CvMat src_points,
@Const
opencv_core.CvMat dst_points,
opencv_core.CvMat homography)
public static void cvRQDecomp3x3(@Const
opencv_core.CvMat matrixM,
opencv_core.CvMat matrixR,
opencv_core.CvMat matrixQ,
opencv_core.CvMat matrixQx,
opencv_core.CvMat matrixQy,
opencv_core.CvMat matrixQz,
opencv_core.CvPoint3D64f eulerAngles)
public static void cvRQDecomp3x3(@Const
opencv_core.CvMat matrixM,
opencv_core.CvMat matrixR,
opencv_core.CvMat matrixQ)
public static void cvRQDecomp3x3(@Const
opencv_core.CvMat matrixM,
opencv_core.CvMat matrixR,
opencv_core.CvMat matrixQ,
opencv_core.CvMat matrixQx,
opencv_core.CvMat matrixQy,
opencv_core.CvMat matrixQz,
@Cast(value="CvPoint3D64f*")
DoubleBuffer eulerAngles)
public static void cvRQDecomp3x3(@Const
opencv_core.CvMat matrixM,
opencv_core.CvMat matrixR,
opencv_core.CvMat matrixQ,
opencv_core.CvMat matrixQx,
opencv_core.CvMat matrixQy,
opencv_core.CvMat matrixQz,
@Cast(value="CvPoint3D64f*")
double[] eulerAngles)
public static void cvDecomposeProjectionMatrix(@Const
opencv_core.CvMat projMatr,
opencv_core.CvMat calibMatr,
opencv_core.CvMat rotMatr,
opencv_core.CvMat posVect,
opencv_core.CvMat rotMatrX,
opencv_core.CvMat rotMatrY,
opencv_core.CvMat rotMatrZ,
opencv_core.CvPoint3D64f eulerAngles)
public static void cvDecomposeProjectionMatrix(@Const
opencv_core.CvMat projMatr,
opencv_core.CvMat calibMatr,
opencv_core.CvMat rotMatr,
opencv_core.CvMat posVect)
public static void cvDecomposeProjectionMatrix(@Const
opencv_core.CvMat projMatr,
opencv_core.CvMat calibMatr,
opencv_core.CvMat rotMatr,
opencv_core.CvMat posVect,
opencv_core.CvMat rotMatrX,
opencv_core.CvMat rotMatrY,
opencv_core.CvMat rotMatrZ,
@Cast(value="CvPoint3D64f*")
DoubleBuffer eulerAngles)
public static void cvDecomposeProjectionMatrix(@Const
opencv_core.CvMat projMatr,
opencv_core.CvMat calibMatr,
opencv_core.CvMat rotMatr,
opencv_core.CvMat posVect,
opencv_core.CvMat rotMatrX,
opencv_core.CvMat rotMatrY,
opencv_core.CvMat rotMatrZ,
@Cast(value="CvPoint3D64f*")
double[] eulerAngles)
public static void cvCalcMatMulDeriv(@Const
opencv_core.CvMat A,
@Const
opencv_core.CvMat B,
opencv_core.CvMat dABdA,
opencv_core.CvMat dABdB)
public static void cvComposeRT(@Const
opencv_core.CvMat _rvec1,
@Const
opencv_core.CvMat _tvec1,
@Const
opencv_core.CvMat _rvec2,
@Const
opencv_core.CvMat _tvec2,
opencv_core.CvMat _rvec3,
opencv_core.CvMat _tvec3,
opencv_core.CvMat dr3dr1,
opencv_core.CvMat dr3dt1,
opencv_core.CvMat dr3dr2,
opencv_core.CvMat dr3dt2,
opencv_core.CvMat dt3dr1,
opencv_core.CvMat dt3dt1,
opencv_core.CvMat dt3dr2,
opencv_core.CvMat dt3dt2)
public static void cvComposeRT(@Const
opencv_core.CvMat _rvec1,
@Const
opencv_core.CvMat _tvec1,
@Const
opencv_core.CvMat _rvec2,
@Const
opencv_core.CvMat _tvec2,
opencv_core.CvMat _rvec3,
opencv_core.CvMat _tvec3)
public static void cvProjectPoints2(@Const
opencv_core.CvMat object_points,
@Const
opencv_core.CvMat rotation_vector,
@Const
opencv_core.CvMat translation_vector,
@Const
opencv_core.CvMat camera_matrix,
@Const
opencv_core.CvMat distortion_coeffs,
opencv_core.CvMat image_points,
opencv_core.CvMat dpdrot,
opencv_core.CvMat dpdt,
opencv_core.CvMat dpdf,
opencv_core.CvMat dpdc,
opencv_core.CvMat dpddist,
double aspect_ratio)
public static void cvProjectPoints2(@Const
opencv_core.CvMat object_points,
@Const
opencv_core.CvMat rotation_vector,
@Const
opencv_core.CvMat translation_vector,
@Const
opencv_core.CvMat camera_matrix,
@Const
opencv_core.CvMat distortion_coeffs,
opencv_core.CvMat image_points)
public static void cvFindExtrinsicCameraParams2(@Const
opencv_core.CvMat object_points,
@Const
opencv_core.CvMat image_points,
@Const
opencv_core.CvMat camera_matrix,
@Const
opencv_core.CvMat distortion_coeffs,
opencv_core.CvMat rotation_vector,
opencv_core.CvMat translation_vector,
int use_extrinsic_guess)
public static void cvFindExtrinsicCameraParams2(@Const
opencv_core.CvMat object_points,
@Const
opencv_core.CvMat image_points,
@Const
opencv_core.CvMat camera_matrix,
@Const
opencv_core.CvMat distortion_coeffs,
opencv_core.CvMat rotation_vector,
opencv_core.CvMat translation_vector)
public static void cvInitIntrinsicParams2D(@Const
opencv_core.CvMat object_points,
@Const
opencv_core.CvMat image_points,
@Const
opencv_core.CvMat npoints,
@ByVal
opencv_core.CvSize image_size,
opencv_core.CvMat camera_matrix,
double aspect_ratio)
public static void cvInitIntrinsicParams2D(@Const
opencv_core.CvMat object_points,
@Const
opencv_core.CvMat image_points,
@Const
opencv_core.CvMat npoints,
@ByVal
opencv_core.CvSize image_size,
opencv_core.CvMat camera_matrix)
public static int cvCheckChessboard(opencv_core.IplImage src, @ByVal opencv_core.CvSize size)
public static int cvFindChessboardCorners(@Const
org.bytedeco.javacpp.Pointer image,
@ByVal
opencv_core.CvSize pattern_size,
opencv_core.CvPoint2D32f corners,
org.bytedeco.javacpp.IntPointer corner_count,
int flags)
public static int cvFindChessboardCorners(@Const
org.bytedeco.javacpp.Pointer image,
@ByVal
opencv_core.CvSize pattern_size,
opencv_core.CvPoint2D32f corners)
public static int cvFindChessboardCorners(@Const
org.bytedeco.javacpp.Pointer image,
@ByVal
opencv_core.CvSize pattern_size,
@Cast(value="CvPoint2D32f*")
FloatBuffer corners,
IntBuffer corner_count,
int flags)
public static int cvFindChessboardCorners(@Const
org.bytedeco.javacpp.Pointer image,
@ByVal
opencv_core.CvSize pattern_size,
@Cast(value="CvPoint2D32f*")
FloatBuffer corners)
public static int cvFindChessboardCorners(@Const
org.bytedeco.javacpp.Pointer image,
@ByVal
opencv_core.CvSize pattern_size,
@Cast(value="CvPoint2D32f*")
float[] corners,
int[] corner_count,
int flags)
public static int cvFindChessboardCorners(@Const
org.bytedeco.javacpp.Pointer image,
@ByVal
opencv_core.CvSize pattern_size,
@Cast(value="CvPoint2D32f*")
float[] corners)
public static void cvDrawChessboardCorners(opencv_core.CvArr image, @ByVal opencv_core.CvSize pattern_size, opencv_core.CvPoint2D32f corners, int count, int pattern_was_found)
public static void cvDrawChessboardCorners(opencv_core.CvArr image, @ByVal opencv_core.CvSize pattern_size, @Cast(value="CvPoint2D32f*") FloatBuffer corners, int count, int pattern_was_found)
public static void cvDrawChessboardCorners(opencv_core.CvArr image, @ByVal opencv_core.CvSize pattern_size, @Cast(value="CvPoint2D32f*") float[] corners, int count, int pattern_was_found)
public static double cvCalibrateCamera2(@Const
opencv_core.CvMat object_points,
@Const
opencv_core.CvMat image_points,
@Const
opencv_core.CvMat point_counts,
@ByVal
opencv_core.CvSize image_size,
opencv_core.CvMat camera_matrix,
opencv_core.CvMat distortion_coeffs,
opencv_core.CvMat rotation_vectors,
opencv_core.CvMat translation_vectors,
int flags,
@ByVal(nullValue="CvTermCriteria(cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,DBL_EPSILON))")
opencv_core.CvTermCriteria term_crit)
public static double cvCalibrateCamera2(@Const
opencv_core.CvMat object_points,
@Const
opencv_core.CvMat image_points,
@Const
opencv_core.CvMat point_counts,
@ByVal
opencv_core.CvSize image_size,
opencv_core.CvMat camera_matrix,
opencv_core.CvMat distortion_coeffs)
public static void cvCalibrationMatrixValues(@Const
opencv_core.CvMat camera_matrix,
@ByVal
opencv_core.CvSize image_size,
double aperture_width,
double aperture_height,
org.bytedeco.javacpp.DoublePointer fovx,
org.bytedeco.javacpp.DoublePointer fovy,
org.bytedeco.javacpp.DoublePointer focal_length,
opencv_core.CvPoint2D64f principal_point,
org.bytedeco.javacpp.DoublePointer pixel_aspect_ratio)
public static void cvCalibrationMatrixValues(@Const
opencv_core.CvMat camera_matrix,
@ByVal
opencv_core.CvSize image_size)
public static void cvCalibrationMatrixValues(@Const
opencv_core.CvMat camera_matrix,
@ByVal
opencv_core.CvSize image_size,
double aperture_width,
double aperture_height,
DoubleBuffer fovx,
DoubleBuffer fovy,
DoubleBuffer focal_length,
@Cast(value="CvPoint2D64f*")
DoubleBuffer principal_point,
DoubleBuffer pixel_aspect_ratio)
public static void cvCalibrationMatrixValues(@Const
opencv_core.CvMat camera_matrix,
@ByVal
opencv_core.CvSize image_size,
double aperture_width,
double aperture_height,
double[] fovx,
double[] fovy,
double[] focal_length,
@Cast(value="CvPoint2D64f*")
double[] principal_point,
double[] pixel_aspect_ratio)
public static double cvStereoCalibrate(@Const
opencv_core.CvMat object_points,
@Const
opencv_core.CvMat image_points1,
@Const
opencv_core.CvMat image_points2,
@Const
opencv_core.CvMat npoints,
opencv_core.CvMat camera_matrix1,
opencv_core.CvMat dist_coeffs1,
opencv_core.CvMat camera_matrix2,
opencv_core.CvMat dist_coeffs2,
@ByVal
opencv_core.CvSize image_size,
opencv_core.CvMat R,
opencv_core.CvMat T,
opencv_core.CvMat E,
opencv_core.CvMat F,
int flags,
@ByVal(nullValue="CvTermCriteria(cvTermCriteria(CV_TERMCRIT_ITER+CV_TERMCRIT_EPS,30,1e-6))")
opencv_core.CvTermCriteria term_crit)
public static double cvStereoCalibrate(@Const
opencv_core.CvMat object_points,
@Const
opencv_core.CvMat image_points1,
@Const
opencv_core.CvMat image_points2,
@Const
opencv_core.CvMat npoints,
opencv_core.CvMat camera_matrix1,
opencv_core.CvMat dist_coeffs1,
opencv_core.CvMat camera_matrix2,
opencv_core.CvMat dist_coeffs2,
@ByVal
opencv_core.CvSize image_size,
opencv_core.CvMat R,
opencv_core.CvMat T)
public static void cvStereoRectify(@Const
opencv_core.CvMat camera_matrix1,
@Const
opencv_core.CvMat camera_matrix2,
@Const
opencv_core.CvMat dist_coeffs1,
@Const
opencv_core.CvMat dist_coeffs2,
@ByVal
opencv_core.CvSize image_size,
@Const
opencv_core.CvMat R,
@Const
opencv_core.CvMat T,
opencv_core.CvMat R1,
opencv_core.CvMat R2,
opencv_core.CvMat P1,
opencv_core.CvMat P2,
opencv_core.CvMat Q,
int flags,
double alpha,
@ByVal(nullValue="CvSize(cvSize(0,0))")
opencv_core.CvSize new_image_size,
opencv_core.CvRect valid_pix_ROI1,
opencv_core.CvRect valid_pix_ROI2)
public static void cvStereoRectify(@Const
opencv_core.CvMat camera_matrix1,
@Const
opencv_core.CvMat camera_matrix2,
@Const
opencv_core.CvMat dist_coeffs1,
@Const
opencv_core.CvMat dist_coeffs2,
@ByVal
opencv_core.CvSize image_size,
@Const
opencv_core.CvMat R,
@Const
opencv_core.CvMat T,
opencv_core.CvMat R1,
opencv_core.CvMat R2,
opencv_core.CvMat P1,
opencv_core.CvMat P2)
public static int cvStereoRectifyUncalibrated(@Const
opencv_core.CvMat points1,
@Const
opencv_core.CvMat points2,
@Const
opencv_core.CvMat F,
@ByVal
opencv_core.CvSize img_size,
opencv_core.CvMat H1,
opencv_core.CvMat H2,
double threshold)
public static int cvStereoRectifyUncalibrated(@Const
opencv_core.CvMat points1,
@Const
opencv_core.CvMat points2,
@Const
opencv_core.CvMat F,
@ByVal
opencv_core.CvSize img_size,
opencv_core.CvMat H1,
opencv_core.CvMat H2)
public static opencv_calib3d.CvStereoBMState cvCreateStereoBMState(int preset, int numberOfDisparities)
public static opencv_calib3d.CvStereoBMState cvCreateStereoBMState()
public static void cvReleaseStereoBMState(@Cast(value="CvStereoBMState**")
org.bytedeco.javacpp.PointerPointer state)
public static void cvReleaseStereoBMState(@ByPtrPtr
opencv_calib3d.CvStereoBMState state)
public static void cvFindStereoCorrespondenceBM(@Const
opencv_core.CvArr left,
@Const
opencv_core.CvArr right,
opencv_core.CvArr disparity,
opencv_calib3d.CvStereoBMState state)
@ByVal public static opencv_core.CvRect cvGetValidDisparityROI(@ByVal opencv_core.CvRect roi1, @ByVal opencv_core.CvRect roi2, int minDisparity, int numberOfDisparities, int SADWindowSize)
public static void cvValidateDisparity(opencv_core.CvArr disparity, @Const opencv_core.CvArr cost, int minDisparity, int numberOfDisparities, int disp12MaxDiff)
public static void cvValidateDisparity(opencv_core.CvArr disparity, @Const opencv_core.CvArr cost, int minDisparity, int numberOfDisparities)
public static void cvReprojectImageTo3D(@Const
opencv_core.CvArr disparityImage,
opencv_core.CvArr _3dImage,
@Const
opencv_core.CvMat Q,
int handleMissingValues)
public static void cvReprojectImageTo3D(@Const
opencv_core.CvArr disparityImage,
opencv_core.CvArr _3dImage,
@Const
opencv_core.CvMat Q)
@Namespace(value="cv")
public static void Rodrigues(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat jacobian)
src - Input rotation vector (3x1 or 1x3) or rotation matrix (3x3).dst - Output rotation matrix (3x3) or rotation vector (3x1 or 1x3), respectively.jacobian - Optional output Jacobian matrix, 3x9 or 9x3, which is a matrix of partial
derivatives of the output array components with respect to the input array components.
\f[\begin{array}{l} \theta \leftarrow norm(r) \\ r \leftarrow r/ \theta \\ R = \cos{\theta} I + (1- \cos{\theta} ) r r^T + \sin{\theta} \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} \end{array}\f]
Inverse transformation can be also done easily, since
\f[\sin ( \theta ) \vecthreethree{0}{-r_z}{r_y}{r_z}{0}{-r_x}{-r_y}{r_x}{0} = \frac{R - R^T}{2}\f]
A rotation vector is a convenient and most compact representation of a rotation matrix (since any rotation matrix has just 3 degrees of freedom). The representation is used in the global 3D geometry optimization procedures like calibrateCamera, stereoCalibrate, or solvePnP .
@Namespace(value="cv")
public static void Rodrigues(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst)
@Namespace(value="cv")
public static void Rodrigues(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat jacobian)
@Namespace(value="cv")
public static void Rodrigues(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst)
@Namespace(value="cv")
public static void Rodrigues(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat jacobian)
@Namespace(value="cv")
public static void Rodrigues(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.Mat srcPoints, @ByVal opencv_core.Mat dstPoints, int method, double ransacReprojThreshold, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.Mat mask, int maxIters, double confidence)
srcPoints - Coordinates of the points in the original plane, a matrix of the type CV_32FC2
or vector\dstPoints - Coordinates of the points in the target plane, a matrix of the type CV_32FC2 or
a vector\method - Method used to compute a homography matrix. The following methods are possible:
- **0** - a regular method using all the points, i.e., the least squares method
- **RANSAC** - RANSAC-based robust method
- **LMEDS** - Least-Median robust method
- **RHO** - PROSAC-based robust methodransacReprojThreshold - Maximum allowed reprojection error to treat a point pair as an inlier
(used in the RANSAC and RHO methods only). That is, if
\f[\| \texttt{dstPoints} _i - \texttt{convertPointsHomogeneous} ( \texttt{H} * \texttt{srcPoints} _i) \|_2 > \texttt{ransacReprojThreshold}\f]
then the point \f$i\f$ is considered as an outlier. If srcPoints and dstPoints are measured in pixels,
it usually makes sense to set this parameter somewhere in the range of 1 to 10.mask - Optional output mask set by a robust method ( RANSAC or LMEDS ). Note that the input
mask values are ignored.maxIters - The maximum number of RANSAC iterations.confidence - Confidence level, between 0 and 1.
The function finds and returns the perspective transformation \f$H\f$ between the source and the destination planes:
\f[s_i \vecthree{x'_i}{y'_i}{1} \sim H \vecthree{x_i}{y_i}{1}\f]
so that the back-projection error
\f[\sum _i \left ( x'_i- \frac{h_{11} x_i + h_{12} y_i + h_{13}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2+ \left ( y'_i- \frac{h_{21} x_i + h_{22} y_i + h_{23}}{h_{31} x_i + h_{32} y_i + h_{33}} \right )^2\f]
is minimized. If the parameter method is set to the default value 0, the function uses all the point pairs to compute an initial homography estimate with a simple least-squares scheme.
However, if not all of the point pairs ( \f$srcPoints_i\f$, \f$dstPoints_i\f$ ) fit the rigid perspective transformation (that is, there are some outliers), this initial estimate will be poor. In this case, you can use one of the three robust methods. The methods RANSAC, LMeDS and RHO try many different random subsets of the corresponding point pairs (of four pairs each, collinear pairs are discarded), estimate the homography matrix using this subset and a simple least-squares algorithm, and then compute the quality/goodness of the computed homography (which is the number of inliers for RANSAC or the least median re-projection error for LMeDS). The best subset is then used to produce the initial estimate of the homography matrix and the mask of inliers/outliers.
Regardless of the method, robust or not, the computed homography matrix is refined further (using inliers only in case of a robust method) with the Levenberg-Marquardt method to reduce the re-projection error even more.
The methods RANSAC and RHO can handle practically any ratio of outliers but need a threshold to distinguish inliers from outliers. The method LMeDS does not need any threshold but it works correctly only when there are more than 50% of inliers. Finally, if there are no outliers and the noise is rather small, use the default method (method=0).
The function is used to find initial intrinsic and extrinsic matrices. Homography matrix is determined up to a scale. Thus, it is normalized so that \f$h_{33}=1\f$. Note that whenever an \f$H\f$ matrix cannot be estimated, an empty one will be returned.
\sa getAffineTransform, estimateAffine2D, estimateAffinePartial2D, getPerspectiveTransform, warpPerspective, perspectiveTransform
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.Mat srcPoints, @ByVal opencv_core.Mat dstPoints)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.UMat srcPoints, @ByVal opencv_core.UMat dstPoints, int method, double ransacReprojThreshold, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.UMat mask, int maxIters, double confidence)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.UMat srcPoints, @ByVal opencv_core.UMat dstPoints)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.GpuMat srcPoints, @ByVal opencv_core.GpuMat dstPoints, int method, double ransacReprojThreshold, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.GpuMat mask, int maxIters, double confidence)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.GpuMat srcPoints, @ByVal opencv_core.GpuMat dstPoints)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.Mat srcPoints, @ByVal opencv_core.Mat dstPoints, @ByVal opencv_core.Mat mask, int method, double ransacReprojThreshold)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.Mat srcPoints, @ByVal opencv_core.Mat dstPoints, @ByVal opencv_core.Mat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.UMat srcPoints, @ByVal opencv_core.UMat dstPoints, @ByVal opencv_core.UMat mask, int method, double ransacReprojThreshold)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.UMat srcPoints, @ByVal opencv_core.UMat dstPoints, @ByVal opencv_core.UMat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.GpuMat srcPoints, @ByVal opencv_core.GpuMat dstPoints, @ByVal opencv_core.GpuMat mask, int method, double ransacReprojThreshold)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findHomography(@ByVal opencv_core.GpuMat srcPoints, @ByVal opencv_core.GpuMat dstPoints, @ByVal opencv_core.GpuMat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Point3d RQDecomp3x3(@ByVal opencv_core.Mat src, @ByVal opencv_core.Mat mtxR, @ByVal opencv_core.Mat mtxQ, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.Mat Qx, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.Mat Qy, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.Mat Qz)
src - 3x3 input matrix.mtxR - Output 3x3 upper-triangular matrix.mtxQ - Output 3x3 orthogonal matrix.Qx - Optional output 3x3 rotation matrix around x-axis.Qy - Optional output 3x3 rotation matrix around y-axis.Qz - Optional output 3x3 rotation matrix around z-axis.
The function computes a RQ decomposition using the given rotations. This function is used in decomposeProjectionMatrix to decompose the left 3x3 submatrix of a projection matrix into a camera and a rotation matrix.
It optionally returns three rotation matrices, one for each axis, and the three Euler angles in degrees (as the return value) that could be used in OpenGL. Note, there is always more than one sequence of rotations about the three principal axes that results in the same orientation of an object, e.g. see \cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
@Namespace(value="cv") @ByVal public static opencv_core.Point3d RQDecomp3x3(@ByVal opencv_core.Mat src, @ByVal opencv_core.Mat mtxR, @ByVal opencv_core.Mat mtxQ)
@Namespace(value="cv") @ByVal public static opencv_core.Point3d RQDecomp3x3(@ByVal opencv_core.UMat src, @ByVal opencv_core.UMat mtxR, @ByVal opencv_core.UMat mtxQ, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.UMat Qx, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.UMat Qy, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.UMat Qz)
@Namespace(value="cv") @ByVal public static opencv_core.Point3d RQDecomp3x3(@ByVal opencv_core.UMat src, @ByVal opencv_core.UMat mtxR, @ByVal opencv_core.UMat mtxQ)
@Namespace(value="cv") @ByVal public static opencv_core.Point3d RQDecomp3x3(@ByVal opencv_core.GpuMat src, @ByVal opencv_core.GpuMat mtxR, @ByVal opencv_core.GpuMat mtxQ, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.GpuMat Qx, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.GpuMat Qy, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.GpuMat Qz)
@Namespace(value="cv") @ByVal public static opencv_core.Point3d RQDecomp3x3(@ByVal opencv_core.GpuMat src, @ByVal opencv_core.GpuMat mtxR, @ByVal opencv_core.GpuMat mtxQ)
@Namespace(value="cv")
public static void decomposeProjectionMatrix(@ByVal
opencv_core.Mat projMatrix,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat rotMatrix,
@ByVal
opencv_core.Mat transVect,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat rotMatrixX,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat rotMatrixY,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat rotMatrixZ,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat eulerAngles)
projMatrix - 3x4 input projection matrix P.cameraMatrix - Output 3x3 camera matrix K.rotMatrix - Output 3x3 external rotation matrix R.transVect - Output 4x1 translation vector T.rotMatrixX - Optional 3x3 rotation matrix around x-axis.rotMatrixY - Optional 3x3 rotation matrix around y-axis.rotMatrixZ - Optional 3x3 rotation matrix around z-axis.eulerAngles - Optional three-element vector containing three Euler angles of rotation in
degrees.
The function computes a decomposition of a projection matrix into a calibration and a rotation matrix and the position of a camera.
It optionally returns three rotation matrices, one for each axis, and three Euler angles that could be used in OpenGL. Note, there is always more than one sequence of rotations about the three principal axes that results in the same orientation of an object, e.g. see \cite Slabaugh . Returned tree rotation matrices and corresponding three Euler angles are only one of the possible solutions.
The function is based on RQDecomp3x3 .
@Namespace(value="cv")
public static void decomposeProjectionMatrix(@ByVal
opencv_core.Mat projMatrix,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat rotMatrix,
@ByVal
opencv_core.Mat transVect)
@Namespace(value="cv")
public static void decomposeProjectionMatrix(@ByVal
opencv_core.UMat projMatrix,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat rotMatrix,
@ByVal
opencv_core.UMat transVect,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat rotMatrixX,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat rotMatrixY,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat rotMatrixZ,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat eulerAngles)
@Namespace(value="cv")
public static void decomposeProjectionMatrix(@ByVal
opencv_core.UMat projMatrix,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat rotMatrix,
@ByVal
opencv_core.UMat transVect)
@Namespace(value="cv")
public static void decomposeProjectionMatrix(@ByVal
opencv_core.GpuMat projMatrix,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat rotMatrix,
@ByVal
opencv_core.GpuMat transVect,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat rotMatrixX,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat rotMatrixY,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat rotMatrixZ,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat eulerAngles)
@Namespace(value="cv")
public static void decomposeProjectionMatrix(@ByVal
opencv_core.GpuMat projMatrix,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat rotMatrix,
@ByVal
opencv_core.GpuMat transVect)
@Namespace(value="cv")
public static void matMulDeriv(@ByVal
opencv_core.Mat A,
@ByVal
opencv_core.Mat B,
@ByVal
opencv_core.Mat dABdA,
@ByVal
opencv_core.Mat dABdB)
A - First multiplied matrix.B - Second multiplied matrix.dABdA - First output derivative matrix d(A\*B)/dA of size
\f$\texttt{A.rows*B.cols} \times {A.rows*A.cols}\f$ .dABdB - Second output derivative matrix d(A\*B)/dB of size
\f$\texttt{A.rows*B.cols} \times {B.rows*B.cols}\f$ .
The function computes partial derivatives of the elements of the matrix product \f$A*B\f$ with regard to the elements of each of the two input matrices. The function is used to compute the Jacobian matrices in stereoCalibrate but can also be used in any other similar optimization function.
@Namespace(value="cv")
public static void matMulDeriv(@ByVal
opencv_core.UMat A,
@ByVal
opencv_core.UMat B,
@ByVal
opencv_core.UMat dABdA,
@ByVal
opencv_core.UMat dABdB)
@Namespace(value="cv")
public static void matMulDeriv(@ByVal
opencv_core.GpuMat A,
@ByVal
opencv_core.GpuMat B,
@ByVal
opencv_core.GpuMat dABdA,
@ByVal
opencv_core.GpuMat dABdB)
@Namespace(value="cv")
public static void composeRT(@ByVal
opencv_core.Mat rvec1,
@ByVal
opencv_core.Mat tvec1,
@ByVal
opencv_core.Mat rvec2,
@ByVal
opencv_core.Mat tvec2,
@ByVal
opencv_core.Mat rvec3,
@ByVal
opencv_core.Mat tvec3,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat dr3dr1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat dr3dt1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat dr3dr2,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat dr3dt2,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat dt3dr1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat dt3dt1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat dt3dr2,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat dt3dt2)
rvec1 - First rotation vector.tvec1 - First translation vector.rvec2 - Second rotation vector.tvec2 - Second translation vector.rvec3 - Output rotation vector of the superposition.tvec3 - Output translation vector of the superposition.dr3dr1 - dr3dt1 - dr3dr2 - dr3dt2 - dt3dr1 - dt3dt1 - dt3dr2 - dt3dt2 - Optional output derivatives of rvec3 or tvec3 with regard to rvec1, rvec2, tvec1 and
tvec2, respectively.
The functions compute:
\f[\begin{array}{l} \texttt{rvec3} = \mathrm{rodrigues} ^{-1} \left ( \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \mathrm{rodrigues} ( \texttt{rvec1} ) \right ) \\ \texttt{tvec3} = \mathrm{rodrigues} ( \texttt{rvec2} ) \cdot \texttt{tvec1} + \texttt{tvec2} \end{array} ,\f]
where \f$\mathrm{rodrigues}\f$ denotes a rotation vector to a rotation matrix transformation, and \f$\mathrm{rodrigues}^{-1}\f$ denotes the inverse transformation. See Rodrigues for details.
Also, the functions can compute the derivatives of the output vectors with regards to the input vectors (see matMulDeriv ). The functions are used inside stereoCalibrate but can also be used in your own code where Levenberg-Marquardt or another gradient-based solver is used to optimize a function that contains a matrix multiplication.
@Namespace(value="cv")
public static void composeRT(@ByVal
opencv_core.Mat rvec1,
@ByVal
opencv_core.Mat tvec1,
@ByVal
opencv_core.Mat rvec2,
@ByVal
opencv_core.Mat tvec2,
@ByVal
opencv_core.Mat rvec3,
@ByVal
opencv_core.Mat tvec3)
@Namespace(value="cv")
public static void composeRT(@ByVal
opencv_core.UMat rvec1,
@ByVal
opencv_core.UMat tvec1,
@ByVal
opencv_core.UMat rvec2,
@ByVal
opencv_core.UMat tvec2,
@ByVal
opencv_core.UMat rvec3,
@ByVal
opencv_core.UMat tvec3,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat dr3dr1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat dr3dt1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat dr3dr2,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat dr3dt2,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat dt3dr1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat dt3dt1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat dt3dr2,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat dt3dt2)
@Namespace(value="cv")
public static void composeRT(@ByVal
opencv_core.UMat rvec1,
@ByVal
opencv_core.UMat tvec1,
@ByVal
opencv_core.UMat rvec2,
@ByVal
opencv_core.UMat tvec2,
@ByVal
opencv_core.UMat rvec3,
@ByVal
opencv_core.UMat tvec3)
@Namespace(value="cv")
public static void composeRT(@ByVal
opencv_core.GpuMat rvec1,
@ByVal
opencv_core.GpuMat tvec1,
@ByVal
opencv_core.GpuMat rvec2,
@ByVal
opencv_core.GpuMat tvec2,
@ByVal
opencv_core.GpuMat rvec3,
@ByVal
opencv_core.GpuMat tvec3,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat dr3dr1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat dr3dt1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat dr3dr2,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat dr3dt2,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat dt3dr1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat dt3dt1,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat dt3dr2,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat dt3dt2)
@Namespace(value="cv")
public static void composeRT(@ByVal
opencv_core.GpuMat rvec1,
@ByVal
opencv_core.GpuMat tvec1,
@ByVal
opencv_core.GpuMat rvec2,
@ByVal
opencv_core.GpuMat tvec2,
@ByVal
opencv_core.GpuMat rvec3,
@ByVal
opencv_core.GpuMat tvec3)
@Namespace(value="cv")
public static void projectPoints(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat rvec,
@ByVal
opencv_core.Mat tvec,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.Mat imagePoints,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat jacobian,
double aspectRatio)
objectPoints - Array of object points, 3xN/Nx3 1-channel or 1xN/Nx1 3-channel (or
vector\rvec - Rotation vector. See Rodrigues for details.tvec - Translation vector.cameraMatrix - Camera matrix \f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$ .distCoeffs - Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is empty, the zero distortion coefficients are assumed.imagePoints - Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
vector\jacobian - Optional output 2Nx(10+\aspectRatio - Optional "fixed aspect ratio" parameter. If the parameter is not 0, the
function assumes that the aspect ratio (*fx/fy*) is fixed and correspondingly adjusts the jacobian
matrix.
The function computes projections of 3D points to the image plane given intrinsic and extrinsic camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of image points coordinates (as functions of all the input parameters) with respect to the particular parameters, intrinsic and/or extrinsic. The Jacobians are used during the global optimization in calibrateCamera, solvePnP, and stereoCalibrate . The function itself can also be used to compute a re-projection error given the current intrinsic and extrinsic parameters.
\note By setting rvec=tvec=(0,0,0) or by setting cameraMatrix to a 3x3 identity matrix, or by passing zero distortion coefficients, you can get various useful partial cases of the function. This means that you can compute the distorted coordinates for a sparse set of points or apply a perspective transformation (and also compute the derivatives) in the ideal zero-distortion setup.
@Namespace(value="cv")
public static void projectPoints(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat rvec,
@ByVal
opencv_core.Mat tvec,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.Mat imagePoints)
@Namespace(value="cv")
public static void projectPoints(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat rvec,
@ByVal
opencv_core.UMat tvec,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMat imagePoints,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat jacobian,
double aspectRatio)
@Namespace(value="cv")
public static void projectPoints(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat rvec,
@ByVal
opencv_core.UMat tvec,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMat imagePoints)
@Namespace(value="cv")
public static void projectPoints(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat rvec,
@ByVal
opencv_core.GpuMat tvec,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMat imagePoints,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat jacobian,
double aspectRatio)
@Namespace(value="cv")
public static void projectPoints(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat rvec,
@ByVal
opencv_core.GpuMat tvec,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMat imagePoints)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnP(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat imagePoints,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.Mat rvec,
@ByVal
opencv_core.Mat tvec,
@Cast(value="bool")
boolean useExtrinsicGuess,
int flags)
objectPoints - Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector\imagePoints - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector\cameraMatrix - Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .distCoeffs - Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec - Output rotation vector (see \ref Rodrigues ) that, together with tvec , brings points from
the model coordinate system to the camera coordinate system.tvec - Output translation vector.useExtrinsicGuess - Parameter used for #SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.flags - Method for solving a PnP problem:
- **SOLVEPNP_ITERATIVE** Iterative method is based on Levenberg-Marquardt optimization. In
this case the function finds such a pose that minimizes reprojection error, that is the sum
of squared distances between the observed projections imagePoints and the projected (using
projectPoints ) objectPoints .
- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
"Complete Solution Classification for the Perspective-Three-Point Problem" (\cite gao2003complete).
In this case the function requires exactly four object and image points.
- **SOLVEPNP_AP3P** Method is based on the paper of T. Ke, S. Roumeliotis
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (\cite Ke17).
In this case the function requires exactly four object and image points.
- **SOLVEPNP_EPNP** Method has been introduced by F.Moreno-Noguer, V.Lepetit and P.Fua in the
paper "EPnP: Efficient Perspective-n-Point Camera Pose Estimation" (\cite lepetit2009epnp).
- **SOLVEPNP_DLS** Method is based on the paper of Joel A. Hesch and Stergios I. Roumeliotis.
"A Direct Least-Squares (DLS) Method for PnP" (\cite hesch2011direct).
- **SOLVEPNP_UPNP** Method is based on the paper of A.Penate-Sanchez, J.Andrade-Cetto,
F.Moreno-Noguer. "Exhaustive Linearization for Robust Camera Pose and Focal Length
Estimation" (\cite penate2013exhaustive). In this case the function also estimates the parameters \f$f_x\f$ and \f$f_y\f$
assuming that both have the same value. Then the cameraMatrix is updated with the estimated
focal length.
- **SOLVEPNP_AP3P** Method is based on the paper of Tong Ke and Stergios I. Roumeliotis.
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (\cite Ke17). In this case the
function requires exactly four object and image points.
The function estimates the object pose given a set of object points, their corresponding image projections, as well as the camera matrix and the distortion coefficients, see the figure below (more precisely, the X-axis of the camera frame is pointing to the right, the Y-axis downward and the Z-axis forward).

Points expressed in the world frame \f$ \bf{X}_w \f$ are projected into the image plane \f$ \left[ u, v \right] \f$ using the perspective projection model \f$ \Pi \f$ and the camera intrinsic parameters matrix \f$ \bf{A} \f$:
\f[ \begin{align*} \begin{bmatrix} u \\ v \\ 1 \end{bmatrix} &= \bf{A} \hspace{0.1em} \Pi \hspace{0.2em} ^{c}\bf{M}_w \begin{bmatrix} X_{w} \\ Y_{w} \\ Z_{w} \\ 1 \end{bmatrix} \\ \begin{bmatrix} u \\ v \\ 1 \end{bmatrix} &= \begin{bmatrix} f_x & 0 & c_x \\ 0 & f_y & c_y \\ 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} 1 & 0 & 0 & 0 \\ 0 & 1 & 0 & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix} \begin{bmatrix} r_{11} & r_{12} & r_{13} & t_x \\ r_{21} & r_{22} & r_{23} & t_y \\ r_{31} & r_{32} & r_{33} & t_z \\ 0 & 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} X_{w} \\ Y_{w} \\ Z_{w} \\ 1 \end{bmatrix} \end{align*} \f]
The estimated pose is thus the rotation (rvec) and the translation (tvec) vectors that allow to transform
a 3D point expressed in the world frame into the camera frame:
\f[ \begin{align*} \begin{bmatrix} X_c \\ Y_c \\ Z_c \\ 1 \end{bmatrix} &= \hspace{0.2em} ^{c}\bf{M}_w \begin{bmatrix} X_{w} \\ Y_{w} \\ Z_{w} \\ 1 \end{bmatrix} \\ \begin{bmatrix} X_c \\ Y_c \\ Z_c \\ 1 \end{bmatrix} &= \begin{bmatrix} r_{11} & r_{12} & r_{13} & t_x \\ r_{21} & r_{22} & r_{23} & t_y \\ r_{31} & r_{32} & r_{33} & t_z \\ 0 & 0 & 0 & 1 \end{bmatrix} \begin{bmatrix} X_{w} \\ Y_{w} \\ Z_{w} \\ 1 \end{bmatrix} \end{align*} \f]
\note
- An example of how to use solvePnP for planar augmented reality can be found at
opencv_source_code/samples/python/plane_ar.py
- If you are using Python:
- Numpy array slices won't work as input because solvePnP requires contiguous
arrays (enforced by the assertion using cv::Mat::checkVector() around line 55 of
modules/calib3d/src/solvepnp.cpp version 2.4.9)
- The P3P algorithm requires image points to be in an array of shape (N,1,2) due
to its calling of cv::undistortPoints (around line 75 of modules/calib3d/src/solvepnp.cpp version 2.4.9)
which requires 2-channel information.
- Thus, given some data D = np.array(...) where D.shape = (N,M), in order to use a subset of
it as, e.g., imagePoints, one must effectively copy it into a new array: imagePoints =
np.ascontiguousarray(D[:,:2]).reshape((N,1,2))
- The methods **SOLVEPNP_DLS** and **SOLVEPNP_UPNP** cannot be used as the current implementations are
unstable and sometimes give completely wrong results. If you pass one of these two
flags, **SOLVEPNP_EPNP** method will be used instead.
- The minimum number of points is 4 in the general case. In the case of **SOLVEPNP_P3P** and **SOLVEPNP_AP3P**
methods, it is required to use exactly 4 points (the first 3 points are used to estimate all the solutions
of the P3P problem, the last one is used to retain the best solution that minimizes the reprojection error).
- With **SOLVEPNP_ITERATIVE** method and useExtrinsicGuess=true, the minimum number of points is 3 (3 points
are sufficient to compute a pose but there are up to 4 solutions). The initial solution should be close to the
global solution to converge.
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnP(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat imagePoints,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.Mat rvec,
@ByVal
opencv_core.Mat tvec)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnP(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat imagePoints,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMat rvec,
@ByVal
opencv_core.UMat tvec,
@Cast(value="bool")
boolean useExtrinsicGuess,
int flags)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnP(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat imagePoints,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMat rvec,
@ByVal
opencv_core.UMat tvec)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnP(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat imagePoints,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMat rvec,
@ByVal
opencv_core.GpuMat tvec,
@Cast(value="bool")
boolean useExtrinsicGuess,
int flags)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnP(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat imagePoints,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMat rvec,
@ByVal
opencv_core.GpuMat tvec)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnPRansac(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat imagePoints,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.Mat rvec,
@ByVal
opencv_core.Mat tvec,
@Cast(value="bool")
boolean useExtrinsicGuess,
int iterationsCount,
float reprojectionError,
double confidence,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat inliers,
int flags)
objectPoints - Array of object points in the object coordinate space, Nx3 1-channel or
1xN/Nx1 3-channel, where N is the number of points. vector\imagePoints - Array of corresponding image points, Nx2 1-channel or 1xN/Nx1 2-channel,
where N is the number of points. vector\cameraMatrix - Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .distCoeffs - Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvec - Output rotation vector (see Rodrigues ) that, together with tvec , brings points from
the model coordinate system to the camera coordinate system.tvec - Output translation vector.useExtrinsicGuess - Parameter used for SOLVEPNP_ITERATIVE. If true (1), the function uses
the provided rvec and tvec values as initial approximations of the rotation and translation
vectors, respectively, and further optimizes them.iterationsCount - Number of iterations.reprojectionError - Inlier threshold value used by the RANSAC procedure. The parameter value
is the maximum allowed distance between the observed and computed point projections to consider it
an inlier.confidence - The probability that the algorithm produces a useful result.inliers - Output vector that contains indices of inliers in objectPoints and imagePoints .flags - Method for solving a PnP problem (see solvePnP ).
The function estimates an object pose given a set of object points, their corresponding image projections, as well as the camera matrix and the distortion coefficients. This function finds such a pose that minimizes reprojection error, that is, the sum of squared distances between the observed projections imagePoints and the projected (using projectPoints ) objectPoints. The use of RANSAC makes the function resistant to outliers.
\note - An example of how to use solvePNPRansac for object detection can be found at opencv_source_code/samples/cpp/tutorial_code/calib3d/real_time_pose_estimation/ - The default method used to estimate the camera pose for the Minimal Sample Sets step is #SOLVEPNP_EPNP. Exceptions are: - if you choose #SOLVEPNP_P3P or #SOLVEPNP_AP3P, these methods will be used. - if the number of input points is equal to 4, #SOLVEPNP_P3P is used. - The method used to estimate the camera pose using all the inliers is defined by the flags parameters unless it is equal to #SOLVEPNP_P3P or #SOLVEPNP_AP3P. In this case, the method #SOLVEPNP_EPNP will be used instead.
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnPRansac(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat imagePoints,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.Mat rvec,
@ByVal
opencv_core.Mat tvec)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnPRansac(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat imagePoints,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMat rvec,
@ByVal
opencv_core.UMat tvec,
@Cast(value="bool")
boolean useExtrinsicGuess,
int iterationsCount,
float reprojectionError,
double confidence,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat inliers,
int flags)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnPRansac(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat imagePoints,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMat rvec,
@ByVal
opencv_core.UMat tvec)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnPRansac(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat imagePoints,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMat rvec,
@ByVal
opencv_core.GpuMat tvec,
@Cast(value="bool")
boolean useExtrinsicGuess,
int iterationsCount,
float reprojectionError,
double confidence,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat inliers,
int flags)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean solvePnPRansac(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat imagePoints,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMat rvec,
@ByVal
opencv_core.GpuMat tvec)
@Namespace(value="cv")
public static int solveP3P(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat imagePoints,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
int flags)
objectPoints - Array of object points in the object coordinate space, 3x3 1-channel or
1x3/3x1 3-channel. vector\imagePoints - Array of corresponding image points, 3x2 1-channel or 1x3/3x1 2-channel.
vector\cameraMatrix - Input camera matrix \f$A = \vecthreethree{fx}{0}{cx}{0}{fy}{cy}{0}{0}{1}\f$ .distCoeffs - Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.rvecs - Output rotation vectors (see Rodrigues ) that, together with tvecs , brings points from
the model coordinate system to the camera coordinate system. A P3P problem has up to 4 solutions.tvecs - Output translation vectors.flags - Method for solving a P3P problem:
- **SOLVEPNP_P3P** Method is based on the paper of X.S. Gao, X.-R. Hou, J. Tang, H.-F. Chang
"Complete Solution Classification for the Perspective-Three-Point Problem" (\cite gao2003complete).
- **SOLVEPNP_AP3P** Method is based on the paper of Tong Ke and Stergios I. Roumeliotis.
"An Efficient Algebraic Solution to the Perspective-Three-Point Problem" (\cite Ke17).
The function estimates the object pose given 3 object points, their corresponding image projections, as well as the camera matrix and the distortion coefficients.
@Namespace(value="cv")
public static int solveP3P(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat imagePoints,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
int flags)
@Namespace(value="cv")
public static int solveP3P(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat imagePoints,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
int flags)
@Namespace(value="cv")
public static int solveP3P(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat imagePoints,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
int flags)
@Namespace(value="cv")
public static int solveP3P(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat imagePoints,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
int flags)
@Namespace(value="cv")
public static int solveP3P(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat imagePoints,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
int flags)
@Namespace(value="cv")
public static int solveP3P(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat imagePoints,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
int flags)
@Namespace(value="cv")
public static int solveP3P(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat imagePoints,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
int flags)
@Namespace(value="cv")
public static int solveP3P(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat imagePoints,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
int flags)
@Namespace(value="cv") @ByVal public static opencv_core.Mat initCameraMatrix2D(@ByVal opencv_core.MatVector objectPoints, @ByVal opencv_core.MatVector imagePoints, @ByVal opencv_core.Size imageSize, double aspectRatio)
objectPoints - Vector of vectors of the calibration pattern points in the calibration pattern
coordinate space. In the old interface all the per-view vectors are concatenated. See
calibrateCamera for details.imagePoints - Vector of vectors of the projections of the calibration pattern points. In the
old interface all the per-view vectors are concatenated.imageSize - Image size in pixels used to initialize the principal point.aspectRatio - If it is zero or negative, both \f$f_x\f$ and \f$f_y\f$ are estimated independently.
Otherwise, \f$f_x = f_y * \texttt{aspectRatio}\f$ .
The function estimates and returns an initial camera matrix for the camera calibration process. Currently, the function only supports planar calibration patterns, which are patterns where each object point has z-coordinate =0.
@Namespace(value="cv") @ByVal public static opencv_core.Mat initCameraMatrix2D(@ByVal opencv_core.MatVector objectPoints, @ByVal opencv_core.MatVector imagePoints, @ByVal opencv_core.Size imageSize)
@Namespace(value="cv") @ByVal public static opencv_core.Mat initCameraMatrix2D(@ByVal opencv_core.UMatVector objectPoints, @ByVal opencv_core.UMatVector imagePoints, @ByVal opencv_core.Size imageSize, double aspectRatio)
@Namespace(value="cv") @ByVal public static opencv_core.Mat initCameraMatrix2D(@ByVal opencv_core.UMatVector objectPoints, @ByVal opencv_core.UMatVector imagePoints, @ByVal opencv_core.Size imageSize)
@Namespace(value="cv") @ByVal public static opencv_core.Mat initCameraMatrix2D(@ByVal opencv_core.GpuMatVector objectPoints, @ByVal opencv_core.GpuMatVector imagePoints, @ByVal opencv_core.Size imageSize, double aspectRatio)
@Namespace(value="cv") @ByVal public static opencv_core.Mat initCameraMatrix2D(@ByVal opencv_core.GpuMatVector objectPoints, @ByVal opencv_core.GpuMatVector imagePoints, @ByVal opencv_core.Size imageSize)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findChessboardCorners(@ByVal
opencv_core.Mat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.Mat corners,
int flags)
image - Source chessboard view. It must be an 8-bit grayscale or color image.patternSize - Number of inner corners per a chessboard row and column
( patternSize = cvSize(points_per_row,points_per_colum) = cvSize(columns,rows) ).corners - Output array of detected corners.flags - Various operation flags that can be zero or a combination of the following values:
- **CALIB_CB_ADAPTIVE_THRESH** Use adaptive thresholding to convert the image to black
and white, rather than a fixed threshold level (computed from the average image brightness).
- **CALIB_CB_NORMALIZE_IMAGE** Normalize the image gamma with equalizeHist before
applying fixed or adaptive thresholding.
- **CALIB_CB_FILTER_QUADS** Use additional criteria (like contour area, perimeter,
square-like shape) to filter out false quads extracted at the contour retrieval stage.
- **CALIB_CB_FAST_CHECK** Run a fast check on the image that looks for chessboard corners,
and shortcut the call if none is found. This can drastically speed up the call in the
degenerate condition when no chessboard is observed.
The function attempts to determine whether the input image is a view of the chessboard pattern and locate the internal chessboard corners. The function returns a non-zero value if all of the corners are found and they are placed in a certain order (row by row, left to right in every row). Otherwise, if the function fails to find all the corners or reorder them, it returns 0. For example, a regular chessboard has 8 x 8 squares and 7 x 7 internal corners, that is, points where the black squares touch each other. The detected coordinates are approximate, and to determine their positions more accurately, the function calls cornerSubPix. You also may use the function cornerSubPix with different parameters if returned coordinates are not accurate enough.
Sample usage of detecting and drawing chessboard corners: :
Size patternsize(8,6); //interior number of corners
Mat gray = ....; //source image
vector<Point2f> corners; //this will be filled by the detected corners
//CALIB_CB_FAST_CHECK saves a lot of time on images
//that do not contain any chessboard corners
bool patternfound = findChessboardCorners(gray, patternsize, corners,
CALIB_CB_ADAPTIVE_THRESH + CALIB_CB_NORMALIZE_IMAGE
+ CALIB_CB_FAST_CHECK);
if(patternfound)
cornerSubPix(gray, corners, Size(11, 11), Size(-1, -1),
TermCriteria(CV_TERMCRIT_EPS + CV_TERMCRIT_ITER, 30, 0.1));
drawChessboardCorners(img, patternsize, Mat(corners), patternfound);
\note The function requires white space (like a square-thick border, the wider the better) around
the board to make the detection more robust in various environments. Otherwise, if there is no
border and the background is dark, the outer black squares cannot be segmented properly and so the
square grouping and ordering algorithm fails.@Namespace(value="cv")
@Cast(value="bool")
public static boolean findChessboardCorners(@ByVal
opencv_core.Mat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.Mat corners)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findChessboardCorners(@ByVal
opencv_core.UMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.UMat corners,
int flags)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findChessboardCorners(@ByVal
opencv_core.UMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.UMat corners)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findChessboardCorners(@ByVal
opencv_core.GpuMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.GpuMat corners,
int flags)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findChessboardCorners(@ByVal
opencv_core.GpuMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.GpuMat corners)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean find4QuadCornerSubpix(@ByVal
opencv_core.Mat img,
@ByVal
opencv_core.Mat corners,
@ByVal
opencv_core.Size region_size)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean find4QuadCornerSubpix(@ByVal
opencv_core.UMat img,
@ByVal
opencv_core.UMat corners,
@ByVal
opencv_core.Size region_size)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean find4QuadCornerSubpix(@ByVal
opencv_core.GpuMat img,
@ByVal
opencv_core.GpuMat corners,
@ByVal
opencv_core.Size region_size)
@Namespace(value="cv")
public static void drawChessboardCorners(@ByVal
opencv_core.Mat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.Mat corners,
@Cast(value="bool")
boolean patternWasFound)
image - Destination image. It must be an 8-bit color image.patternSize - Number of inner corners per a chessboard row and column
(patternSize = cv::Size(points_per_row,points_per_column)).corners - Array of detected corners, the output of findChessboardCorners.patternWasFound - Parameter indicating whether the complete board was found or not. The
return value of findChessboardCorners should be passed here.
The function draws individual chessboard corners detected either as red circles if the board was not found, or as colored corners connected with lines if the board was found.
@Namespace(value="cv")
public static void drawChessboardCorners(@ByVal
opencv_core.UMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.UMat corners,
@Cast(value="bool")
boolean patternWasFound)
@Namespace(value="cv")
public static void drawChessboardCorners(@ByVal
opencv_core.GpuMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.GpuMat corners,
@Cast(value="bool")
boolean patternWasFound)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid(@ByVal
opencv_core.Mat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.Mat centers,
int flags,
@Cast(value="cv::FeatureDetector*") @opencv_core.Ptr
opencv_features2d.Feature2D blobDetector,
@ByVal
opencv_calib3d.CirclesGridFinderParameters parameters)
image - grid view of input circles; it must be an 8-bit grayscale or color image.patternSize - number of circles per row and column
( patternSize = Size(points_per_row, points_per_colum) ).centers - output array of detected centers.flags - various operation flags that can be one of the following values:
- **CALIB_CB_SYMMETRIC_GRID** uses symmetric pattern of circles.
- **CALIB_CB_ASYMMETRIC_GRID** uses asymmetric pattern of circles.
- **CALIB_CB_CLUSTERING** uses a special algorithm for grid detection. It is more robust to
perspective distortions but much more sensitive to background clutter.blobDetector - feature detector that finds blobs like dark circles on light background.parameters - struct for finding circles in a grid pattern.
The function attempts to determine whether the input image contains a grid of circles. If it is, the function locates centers of the circles. The function returns a non-zero value if all of the centers have been found and they have been placed in a certain order (row by row, left to right in every row). Otherwise, if the function fails to find all the corners or reorder them, it returns 0.
Sample usage of detecting and drawing the centers of circles: :
Size patternsize(7,7); //number of centers
Mat gray = ....; //source image
vector<Point2f> centers; //this will be filled by the detected centers
bool patternfound = findCirclesGrid(gray, patternsize, centers);
drawChessboardCorners(img, patternsize, Mat(centers), patternfound);
\note The function requires white space (like a square-thick border, the wider the better) around
the board to make the detection more robust in various environments.@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid(@ByVal
opencv_core.UMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.UMat centers,
int flags,
@Cast(value="cv::FeatureDetector*") @opencv_core.Ptr
opencv_features2d.Feature2D blobDetector,
@ByVal
opencv_calib3d.CirclesGridFinderParameters parameters)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid(@ByVal
opencv_core.GpuMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.GpuMat centers,
int flags,
@Cast(value="cv::FeatureDetector*") @opencv_core.Ptr
opencv_features2d.Feature2D blobDetector,
@ByVal
opencv_calib3d.CirclesGridFinderParameters parameters)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid2(@ByVal
opencv_core.Mat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.Mat centers,
int flags,
@Cast(value="cv::FeatureDetector*") @opencv_core.Ptr
opencv_features2d.Feature2D blobDetector,
@ByVal
opencv_calib3d.CirclesGridFinderParameters2 parameters)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid2(@ByVal
opencv_core.UMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.UMat centers,
int flags,
@Cast(value="cv::FeatureDetector*") @opencv_core.Ptr
opencv_features2d.Feature2D blobDetector,
@ByVal
opencv_calib3d.CirclesGridFinderParameters2 parameters)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid2(@ByVal
opencv_core.GpuMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.GpuMat centers,
int flags,
@Cast(value="cv::FeatureDetector*") @opencv_core.Ptr
opencv_features2d.Feature2D blobDetector,
@ByVal
opencv_calib3d.CirclesGridFinderParameters2 parameters)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid(@ByVal
opencv_core.Mat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.Mat centers,
int flags,
@Cast(value="cv::FeatureDetector*") @opencv_core.Ptr
opencv_features2d.Feature2D blobDetector)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid(@ByVal
opencv_core.Mat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.Mat centers)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid(@ByVal
opencv_core.UMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.UMat centers,
int flags,
@Cast(value="cv::FeatureDetector*") @opencv_core.Ptr
opencv_features2d.Feature2D blobDetector)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid(@ByVal
opencv_core.UMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.UMat centers)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid(@ByVal
opencv_core.GpuMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.GpuMat centers,
int flags,
@Cast(value="cv::FeatureDetector*") @opencv_core.Ptr
opencv_features2d.Feature2D blobDetector)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean findCirclesGrid(@ByVal
opencv_core.GpuMat image,
@ByVal
opencv_core.Size patternSize,
@ByVal
opencv_core.GpuMat centers)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
@ByVal
opencv_core.Mat stdDeviationsIntrinsics,
@ByVal
opencv_core.Mat stdDeviationsExtrinsics,
@ByVal
opencv_core.Mat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
objectPoints - In the new interface it is a vector of vectors of calibration pattern points in
the calibration pattern coordinate space (e.g. std::vectorimagePoints - In the new interface it is a vector of vectors of the projections of calibration
pattern points (e.g. std::vectorimageSize - Size of the image used only to initialize the intrinsic camera matrix.cameraMatrix - Output 3x3 floating-point camera matrix
\f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If CV\_CALIB\_USE\_INTRINSIC\_GUESS
and/or CALIB_FIX_ASPECT_RATIO are specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.distCoeffs - Output vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements.rvecs - Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view
(e.g. std::vectortvecs - Output vector of translation vectors estimated for each pattern view.stdDeviationsIntrinsics - Output vector of standard deviations estimated for intrinsic parameters.
Order of deviations values:
\f$(f_x, f_y, c_x, c_y, k_1, k_2, p_1, p_2, k_3, k_4, k_5, k_6 , s_1, s_2, s_3,
s_4, \tau_x, \tau_y)\f$ If one of parameters is not estimated, it's deviation is equals to zero.stdDeviationsExtrinsics - Output vector of standard deviations estimated for extrinsic parameters.
Order of deviations values: \f$(R_1, T_1, \dotsc , R_M, T_M)\f$ where M is number of pattern views,
\f$R_i, T_i\f$ are concatenated 1x3 vectors.perViewErrors - Output vector of the RMS re-projection error estimated for each pattern view.flags - Different flags that may be zero or a combination of the following values:
- **CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
center ( imageSize is used), and focal distances are computed in a least-squares fashion.
Note, that if intrinsic parameters are known, there is no need to use this function just to
estimate extrinsic parameters. Use solvePnP instead.
- **CALIB_FIX_PRINCIPAL_POINT** The principal point is not changed during the global
optimization. It stays at the center or at a different location specified when
CALIB_USE_INTRINSIC_GUESS is set too.
- **CALIB_FIX_ASPECT_RATIO** The functions considers only fy as a free parameter. The
ratio fx/fy stays the same as in the input cameraMatrix . When
CALIB_USE_INTRINSIC_GUESS is not set, the actual input values of fx and fy are
ignored, only their ratio is computed and used further.
- **CALIB_ZERO_TANGENT_DIST** Tangential distortion coefficients \f$(p_1, p_2)\f$ are set
to zeros and stay zero.
- **CALIB_FIX_K1,...,CALIB_FIX_K6** The corresponding radial distortion
coefficient is not changed during the optimization. If CALIB_USE_INTRINSIC_GUESS is
set, the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- **CALIB_RATIONAL_MODEL** Coefficients k4, k5, and k6 are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the rational model and return 8 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the thin prism model and return 12 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during
the optimization. If CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- **CALIB_TILTED_MODEL** Coefficients tauX and tauY are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB_FIX_TAUX_TAUY** The coefficients of the tilted sensor model are not changed during
the optimization. If CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
supplied distCoeffs matrix is used. Otherwise, it is set to 0.criteria - Termination criteria for the iterative optimization algorithm.
The function estimates the intrinsic camera parameters and extrinsic parameters for each of the views. The algorithm is based on \cite Zhang2000 and \cite BouguetMCT . The coordinates of 3D object points and their corresponding 2D projections in each view must be specified. That may be achieved by using an object with a known geometry and easily detectable feature points. Such an object is called a calibration rig or calibration pattern, and OpenCV has built-in support for a chessboard as a calibration rig (see findChessboardCorners ). Currently, initialization of intrinsic parameters (when CALIB_USE_INTRINSIC_GUESS is not set) is only implemented for planar calibration patterns (where Z-coordinates of the object points must be all zeros). 3D calibration rigs can also be used as long as initial cameraMatrix is provided.
The algorithm performs the following steps:
- Compute the initial intrinsic parameters (the option only available for planar calibration patterns) or read them from the input parameters. The distortion coefficients are all set to zeros initially unless some of CALIB_FIX_K? are specified.
- Estimate the initial camera pose as if the intrinsic parameters have been already known. This is done using solvePnP .
- Run the global Levenberg-Marquardt optimization algorithm to minimize the reprojection error, that is, the total sum of squared distances between the observed feature points imagePoints and the projected (using the current estimates for camera parameters and the poses) object points objectPoints. See projectPoints for details.
\note If you use a non-square (=non-NxN) grid and findChessboardCorners for calibration, and calibrateCamera returns bad values (zero distortion coefficients, an image center very far from (w/2-0.5,h/2-0.5), and/or large differences between \f$f_x\f$ and \f$f_y\f$ (ratios of 10:1 or more)), then you have probably used patternSize=cvSize(rows,cols) instead of using patternSize=cvSize(cols,rows) in findChessboardCorners .
\sa findChessboardCorners, solvePnP, initCameraMatrix2D, stereoCalibrate, undistort
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
@ByVal
opencv_core.Mat stdDeviationsIntrinsics,
@ByVal
opencv_core.Mat stdDeviationsExtrinsics,
@ByVal
opencv_core.Mat perViewErrors)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
@ByVal
opencv_core.Mat stdDeviationsIntrinsics,
@ByVal
opencv_core.Mat stdDeviationsExtrinsics,
@ByVal
opencv_core.Mat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
@ByVal
opencv_core.Mat stdDeviationsIntrinsics,
@ByVal
opencv_core.Mat stdDeviationsExtrinsics,
@ByVal
opencv_core.Mat perViewErrors)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
@ByVal
opencv_core.Mat stdDeviationsIntrinsics,
@ByVal
opencv_core.Mat stdDeviationsExtrinsics,
@ByVal
opencv_core.Mat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
@ByVal
opencv_core.Mat stdDeviationsIntrinsics,
@ByVal
opencv_core.Mat stdDeviationsExtrinsics,
@ByVal
opencv_core.Mat perViewErrors)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
@ByVal
opencv_core.UMat stdDeviationsIntrinsics,
@ByVal
opencv_core.UMat stdDeviationsExtrinsics,
@ByVal
opencv_core.UMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
@ByVal
opencv_core.UMat stdDeviationsIntrinsics,
@ByVal
opencv_core.UMat stdDeviationsExtrinsics,
@ByVal
opencv_core.UMat perViewErrors)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
@ByVal
opencv_core.UMat stdDeviationsIntrinsics,
@ByVal
opencv_core.UMat stdDeviationsExtrinsics,
@ByVal
opencv_core.UMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
@ByVal
opencv_core.UMat stdDeviationsIntrinsics,
@ByVal
opencv_core.UMat stdDeviationsExtrinsics,
@ByVal
opencv_core.UMat perViewErrors)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
@ByVal
opencv_core.UMat stdDeviationsIntrinsics,
@ByVal
opencv_core.UMat stdDeviationsExtrinsics,
@ByVal
opencv_core.UMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
@ByVal
opencv_core.UMat stdDeviationsIntrinsics,
@ByVal
opencv_core.UMat stdDeviationsExtrinsics,
@ByVal
opencv_core.UMat perViewErrors)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
@ByVal
opencv_core.GpuMat stdDeviationsIntrinsics,
@ByVal
opencv_core.GpuMat stdDeviationsExtrinsics,
@ByVal
opencv_core.GpuMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
@ByVal
opencv_core.GpuMat stdDeviationsIntrinsics,
@ByVal
opencv_core.GpuMat stdDeviationsExtrinsics,
@ByVal
opencv_core.GpuMat perViewErrors)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
@ByVal
opencv_core.GpuMat stdDeviationsIntrinsics,
@ByVal
opencv_core.GpuMat stdDeviationsExtrinsics,
@ByVal
opencv_core.GpuMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
@ByVal
opencv_core.GpuMat stdDeviationsIntrinsics,
@ByVal
opencv_core.GpuMat stdDeviationsExtrinsics,
@ByVal
opencv_core.GpuMat perViewErrors)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
@ByVal
opencv_core.GpuMat stdDeviationsIntrinsics,
@ByVal
opencv_core.GpuMat stdDeviationsExtrinsics,
@ByVal
opencv_core.GpuMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="calibrateCamera")
public static double calibrateCameraExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
@ByVal
opencv_core.GpuMat stdDeviationsIntrinsics,
@ByVal
opencv_core.GpuMat stdDeviationsExtrinsics,
@ByVal
opencv_core.GpuMat perViewErrors)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 30, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double calibrateCamera(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat distCoeffs,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs)
@Namespace(value="cv")
public static void calibrationMatrixValues(@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
@ByRef
org.bytedeco.javacpp.DoublePointer fovx,
@ByRef
org.bytedeco.javacpp.DoublePointer fovy,
@ByRef
org.bytedeco.javacpp.DoublePointer focalLength,
@ByRef
opencv_core.Point2d principalPoint,
@ByRef
org.bytedeco.javacpp.DoublePointer aspectRatio)
cameraMatrix - Input camera matrix that can be estimated by calibrateCamera or
stereoCalibrate .imageSize - Input image size in pixels.apertureWidth - Physical width in mm of the sensor.apertureHeight - Physical height in mm of the sensor.fovx - Output field of view in degrees along the horizontal sensor axis.fovy - Output field of view in degrees along the vertical sensor axis.focalLength - Focal length of the lens in mm.principalPoint - Principal point in mm.aspectRatio - \f$f_y/f_x\f$
The function computes various useful camera characteristics from the previously estimated camera matrix.
\note Do keep in mind that the unity measure 'mm' stands for whatever unit of measure one chooses for the chessboard pitch (it can thus be any value).
@Namespace(value="cv")
public static void calibrationMatrixValues(@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
@ByRef
DoubleBuffer fovx,
@ByRef
DoubleBuffer fovy,
@ByRef
DoubleBuffer focalLength,
@ByRef
opencv_core.Point2d principalPoint,
@ByRef
DoubleBuffer aspectRatio)
@Namespace(value="cv")
public static void calibrationMatrixValues(@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
@ByRef
double[] fovx,
@ByRef
double[] fovy,
@ByRef
double[] focalLength,
@ByRef
opencv_core.Point2d principalPoint,
@ByRef
double[] aspectRatio)
@Namespace(value="cv")
public static void calibrationMatrixValues(@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
@ByRef
org.bytedeco.javacpp.DoublePointer fovx,
@ByRef
org.bytedeco.javacpp.DoublePointer fovy,
@ByRef
org.bytedeco.javacpp.DoublePointer focalLength,
@ByRef
opencv_core.Point2d principalPoint,
@ByRef
org.bytedeco.javacpp.DoublePointer aspectRatio)
@Namespace(value="cv")
public static void calibrationMatrixValues(@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
@ByRef
DoubleBuffer fovx,
@ByRef
DoubleBuffer fovy,
@ByRef
DoubleBuffer focalLength,
@ByRef
opencv_core.Point2d principalPoint,
@ByRef
DoubleBuffer aspectRatio)
@Namespace(value="cv")
public static void calibrationMatrixValues(@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
@ByRef
double[] fovx,
@ByRef
double[] fovy,
@ByRef
double[] focalLength,
@ByRef
opencv_core.Point2d principalPoint,
@ByRef
double[] aspectRatio)
@Namespace(value="cv")
public static void calibrationMatrixValues(@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
@ByRef
org.bytedeco.javacpp.DoublePointer fovx,
@ByRef
org.bytedeco.javacpp.DoublePointer fovy,
@ByRef
org.bytedeco.javacpp.DoublePointer focalLength,
@ByRef
opencv_core.Point2d principalPoint,
@ByRef
org.bytedeco.javacpp.DoublePointer aspectRatio)
@Namespace(value="cv")
public static void calibrationMatrixValues(@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
@ByRef
DoubleBuffer fovx,
@ByRef
DoubleBuffer fovy,
@ByRef
DoubleBuffer focalLength,
@ByRef
opencv_core.Point2d principalPoint,
@ByRef
DoubleBuffer aspectRatio)
@Namespace(value="cv")
public static void calibrationMatrixValues(@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
@ByRef
double[] fovx,
@ByRef
double[] fovy,
@ByRef
double[] focalLength,
@ByRef
opencv_core.Point2d principalPoint,
@ByRef
double[] aspectRatio)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F,
@ByVal
opencv_core.Mat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
objectPoints - Vector of vectors of the calibration pattern points.imagePoints1 - Vector of vectors of the projections of the calibration pattern points,
observed by the first camera.imagePoints2 - Vector of vectors of the projections of the calibration pattern points,
observed by the second camera.cameraMatrix1 - Input/output first camera matrix:
\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
any of CALIB_USE_INTRINSIC_GUESS , CALIB_FIX_ASPECT_RATIO ,
CALIB_FIX_INTRINSIC , or CALIB_FIX_FOCAL_LENGTH are specified, some or all of the
matrix components must be initialized. See the flags description for details.distCoeffs1 - Input/output vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. The output vector length depends on the flags.cameraMatrix2 - Input/output second camera matrix. The parameter is similar to cameraMatrix1distCoeffs2 - Input/output lens distortion coefficients for the second camera. The parameter
is similar to distCoeffs1 .imageSize - Size of the image used only to initialize intrinsic camera matrix.R - Output rotation matrix between the 1st and the 2nd camera coordinate systems.T - Output translation vector between the coordinate systems of the cameras.E - Output essential matrix.F - Output fundamental matrix.perViewErrors - Output vector of the RMS re-projection error estimated for each pattern view.flags - Different flags that may be zero or a combination of the following values:
- **CALIB_FIX_INTRINSIC** Fix cameraMatrix? and distCoeffs? so that only R, T, E , and F
matrices are estimated.
- **CALIB_USE_INTRINSIC_GUESS** Optimize some or all of the intrinsic parameters
according to the specified flags. Initial values are provided by the user.
- **CALIB_USE_EXTRINSIC_GUESS** R, T contain valid initial values that are optimized further.
Otherwise R, T are initialized to the median value of the pattern views (each dimension separately).
- **CALIB_FIX_PRINCIPAL_POINT** Fix the principal points during the optimization.
- **CALIB_FIX_FOCAL_LENGTH** Fix \f$f^{(j)}_x\f$ and \f$f^{(j)}_y\f$ .
- **CALIB_FIX_ASPECT_RATIO** Optimize \f$f^{(j)}_y\f$ . Fix the ratio \f$f^{(j)}_x/f^{(j)}_y\f$
.
- **CALIB_SAME_FOCAL_LENGTH** Enforce \f$f^{(0)}_x=f^{(1)}_x\f$ and \f$f^{(0)}_y=f^{(1)}_y\f$ .
- **CALIB_ZERO_TANGENT_DIST** Set tangential distortion coefficients for each camera to
zeros and fix there.
- **CALIB_FIX_K1,...,CALIB_FIX_K6** Do not change the corresponding radial
distortion coefficient during the optimization. If CALIB_USE_INTRINSIC_GUESS is set,
the coefficient from the supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- **CALIB_RATIONAL_MODEL** Enable coefficients k4, k5, and k6. To provide the backward
compatibility, this extra flag should be explicitly specified to make the calibration
function use the rational model and return 8 coefficients. If the flag is not set, the
function computes and returns only 5 distortion coefficients.
- **CALIB_THIN_PRISM_MODEL** Coefficients s1, s2, s3 and s4 are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the thin prism model and return 12 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB_FIX_S1_S2_S3_S4** The thin prism distortion coefficients are not changed during
the optimization. If CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
supplied distCoeffs matrix is used. Otherwise, it is set to 0.
- **CALIB_TILTED_MODEL** Coefficients tauX and tauY are enabled. To provide the
backward compatibility, this extra flag should be explicitly specified to make the
calibration function use the tilted sensor model and return 14 coefficients. If the flag is not
set, the function computes and returns only 5 distortion coefficients.
- **CALIB_FIX_TAUX_TAUY** The coefficients of the tilted sensor model are not changed during
the optimization. If CALIB_USE_INTRINSIC_GUESS is set, the coefficient from the
supplied distCoeffs matrix is used. Otherwise, it is set to 0.criteria - Termination criteria for the iterative optimization algorithm.
The function estimates transformation between two cameras making a stereo pair. If you have a stereo camera where the relative position and orientation of two cameras is fixed, and if you computed poses of an object relative to the first camera and to the second camera, (R1, T1) and (R2, T2), respectively (this can be done with solvePnP ), then those poses definitely relate to each other. This means that, given ( \f$R_1\f$,\f$T_1\f$ ), it should be possible to compute ( \f$R_2\f$,\f$T_2\f$ ). You only need to know the position and orientation of the second camera relative to the first camera. This is what the described function does. It computes ( \f$R\f$,\f$T\f$ ) so that:
\f[R_2=R*R_1\f] \f[T_2=R*T_1 + T,\f]
Optionally, it computes the essential matrix E:
\f[E= \vecthreethree{0}{-T_2}{T_1}{T_2}{0}{-T_0}{-T_1}{T_0}{0} *R\f]
where \f$T_i\f$ are components of the translation vector \f$T\f$ : \f$T=[T_0, T_1, T_2]^T\f$ . And the function can also compute the fundamental matrix F:
\f[F = cameraMatrix2^{-T} E cameraMatrix1^{-1}\f]
Besides the stereo-related information, the function can also perform a full calibration of each of two cameras. However, due to the high dimensionality of the parameter space and noise in the input data, the function can diverge from the correct solution. If the intrinsic parameters can be estimated with high accuracy for each of the cameras individually (for example, using calibrateCamera ), you are recommended to do so and then pass CALIB_FIX_INTRINSIC flag to the function along with the computed intrinsic parameters. Otherwise, if all the parameters are estimated at once, it makes sense to restrict some parameters, for example, pass CALIB_SAME_FOCAL_LENGTH and CALIB_ZERO_TANGENT_DIST flags, which is usually a reasonable assumption.
Similarly to calibrateCamera , the function minimizes the total re-projection error for all the points in all the available views from both cameras. The function returns the final value of the re-projection error.
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F,
@ByVal
opencv_core.Mat perViewErrors)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F,
@ByVal
opencv_core.Mat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F,
@ByVal
opencv_core.Mat perViewErrors)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F,
@ByVal
opencv_core.Mat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F,
@ByVal
opencv_core.Mat perViewErrors)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F,
@ByVal
opencv_core.UMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F,
@ByVal
opencv_core.UMat perViewErrors)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F,
@ByVal
opencv_core.UMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F,
@ByVal
opencv_core.UMat perViewErrors)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F,
@ByVal
opencv_core.UMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F,
@ByVal
opencv_core.UMat perViewErrors)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F,
@ByVal
opencv_core.GpuMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F,
@ByVal
opencv_core.GpuMat perViewErrors)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F,
@ByVal
opencv_core.GpuMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F,
@ByVal
opencv_core.GpuMat perViewErrors)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F,
@ByVal
opencv_core.GpuMat perViewErrors,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
@Name(value="stereoCalibrate")
public static double stereoCalibrateExtended(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F,
@ByVal
opencv_core.GpuMat perViewErrors)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat F)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat F)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT+cv::TermCriteria::EPS, 30, 1e-6)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat F)
@Namespace(value="cv")
public static void stereoRectify(@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat R1,
@ByVal
opencv_core.Mat R2,
@ByVal
opencv_core.Mat P1,
@ByVal
opencv_core.Mat P2,
@ByVal
opencv_core.Mat Q,
int flags,
double alpha,
@ByVal(nullValue="cv::Size()")
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2)
cameraMatrix1 - First camera matrix.distCoeffs1 - First camera distortion parameters.cameraMatrix2 - Second camera matrix.distCoeffs2 - Second camera distortion parameters.imageSize - Size of the image used for stereo calibration.R - Rotation matrix between the coordinate systems of the first and the second cameras.T - Translation vector between coordinate systems of the cameras.R1 - Output 3x3 rectification transform (rotation matrix) for the first camera.R2 - Output 3x3 rectification transform (rotation matrix) for the second camera.P1 - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2 - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q - Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags - Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.alpha - Free scaling parameter. If it is -1 or absent, the function performs the default
scaling. Otherwise, the parameter should be between 0 and 1. alpha=0 means that the rectified
images are zoomed and shifted so that only valid pixels are visible (no black areas after
rectification). alpha=1 means that the rectified image is decimated and shifted so that all the
pixels from the original images from the cameras are retained in the rectified images (no source
image pixels are lost). Obviously, any intermediate value yields an intermediate result between
those two extreme cases.newImageSize - New image resolution after rectification. The same size should be passed to
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.validPixROI1 - Optional output rectangles inside the rectified images where all the pixels
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).validPixROI2 - Optional output rectangles inside the rectified images where all the pixels
are valid. If alpha=0 , the ROIs cover the whole images. Otherwise, they are likely to be smaller
(see the picture below).
The function computes the rotation matrices for each camera that (virtually) make both camera image planes the same plane. Consequently, this makes all the epipolar lines parallel and thus simplifies the dense stereo correspondence problem. The function takes the matrices computed by stereoCalibrate as input. As output, it provides two rotation matrices and also two projection matrices in the new coordinates. The function distinguishes the following two cases:
- **Horizontal stereo**: the first and the second camera views are shifted relative to each other mainly along the x axis (with possible small vertical shift). In the rectified images, the corresponding epipolar lines in the left and right cameras are horizontal and have the same y-coordinate. P1 and P2 look like:
\f[\texttt{P1} = \begin{bmatrix} f & 0 & cx_1 & 0 \\ 0 & f & cy & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix}\f]
\f[\texttt{P2} = \begin{bmatrix} f & 0 & cx_2 & T_x*f \\ 0 & f & cy & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix} ,\f]
where \f$T_x\f$ is a horizontal shift between the cameras and \f$cx_1=cx_2\f$ if CALIB_ZERO_DISPARITY is set.
- **Vertical stereo**: the first and the second camera views are shifted relative to each other mainly in vertical direction (and probably a bit in the horizontal direction too). The epipolar lines in the rectified images are vertical and have the same x-coordinate. P1 and P2 look like:
\f[\texttt{P1} = \begin{bmatrix} f & 0 & cx & 0 \\ 0 & f & cy_1 & 0 \\ 0 & 0 & 1 & 0 \end{bmatrix}\f]
\f[\texttt{P2} = \begin{bmatrix} f & 0 & cx & 0 \\ 0 & f & cy_2 & T_y*f \\ 0 & 0 & 1 & 0 \end{bmatrix} ,\f]
where \f$T_y\f$ is a vertical shift between the cameras and \f$cy_1=cy_2\f$ if CALIB_ZERO_DISPARITY is set.
As you can see, the first three columns of P1 and P2 will effectively be the new "rectified" camera matrices. The matrices, together with R1 and R2 , can then be passed to initUndistortRectifyMap to initialize the rectification map for each camera.
See below the screenshot from the stereo_calib.cpp sample. Some red horizontal lines pass through the corresponding image regions. This means that the images are well rectified, which is what most stereo correspondence algorithms rely on. The green rectangles are roi1 and roi2 . You see that their interiors are all valid pixels.

@Namespace(value="cv")
public static void stereoRectify(@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
@ByVal
opencv_core.Mat R1,
@ByVal
opencv_core.Mat R2,
@ByVal
opencv_core.Mat P1,
@ByVal
opencv_core.Mat P2,
@ByVal
opencv_core.Mat Q)
@Namespace(value="cv")
public static void stereoRectify(@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat R1,
@ByVal
opencv_core.UMat R2,
@ByVal
opencv_core.UMat P1,
@ByVal
opencv_core.UMat P2,
@ByVal
opencv_core.UMat Q,
int flags,
double alpha,
@ByVal(nullValue="cv::Size()")
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2)
@Namespace(value="cv")
public static void stereoRectify(@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
@ByVal
opencv_core.UMat R1,
@ByVal
opencv_core.UMat R2,
@ByVal
opencv_core.UMat P1,
@ByVal
opencv_core.UMat P2,
@ByVal
opencv_core.UMat Q)
@Namespace(value="cv")
public static void stereoRectify(@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat R1,
@ByVal
opencv_core.GpuMat R2,
@ByVal
opencv_core.GpuMat P1,
@ByVal
opencv_core.GpuMat P2,
@ByVal
opencv_core.GpuMat Q,
int flags,
double alpha,
@ByVal(nullValue="cv::Size()")
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2)
@Namespace(value="cv")
public static void stereoRectify(@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
@ByVal
opencv_core.GpuMat R1,
@ByVal
opencv_core.GpuMat R2,
@ByVal
opencv_core.GpuMat P1,
@ByVal
opencv_core.GpuMat P2,
@ByVal
opencv_core.GpuMat Q)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean stereoRectifyUncalibrated(@ByVal
opencv_core.Mat points1,
@ByVal
opencv_core.Mat points2,
@ByVal
opencv_core.Mat F,
@ByVal
opencv_core.Size imgSize,
@ByVal
opencv_core.Mat H1,
@ByVal
opencv_core.Mat H2,
double threshold)
points1 - Array of feature points in the first image.points2 - The corresponding points in the second image. The same formats as in
findFundamentalMat are supported.F - Input fundamental matrix. It can be computed from the same set of point pairs using
findFundamentalMat .imgSize - Size of the image.H1 - Output rectification homography matrix for the first image.H2 - Output rectification homography matrix for the second image.threshold - Optional threshold used to filter out the outliers. If the parameter is greater
than zero, all the point pairs that do not comply with the epipolar geometry (that is, the points
for which \f$|\texttt{points2[i]}^T*\texttt{F}*\texttt{points1[i]}|>\texttt{threshold}\f$ ) are
rejected prior to computing the homographies. Otherwise, all the points are considered inliers.
The function computes the rectification transformations without knowing intrinsic parameters of the cameras and their relative position in the space, which explains the suffix "uncalibrated". Another related difference from stereoRectify is that the function outputs not the rectification transformations in the object (3D) space, but the planar perspective transformations encoded by the homography matrices H1 and H2 . The function implements the algorithm \cite Hartley99 .
\note While the algorithm does not need to know the intrinsic parameters of the cameras, it heavily depends on the epipolar geometry. Therefore, if the camera lenses have a significant distortion, it would be better to correct it before computing the fundamental matrix and calling this function. For example, distortion coefficients can be estimated for each head of stereo camera separately by using calibrateCamera . Then, the images can be corrected using undistort , or just the point coordinates can be corrected with undistortPoints .
@Namespace(value="cv")
@Cast(value="bool")
public static boolean stereoRectifyUncalibrated(@ByVal
opencv_core.Mat points1,
@ByVal
opencv_core.Mat points2,
@ByVal
opencv_core.Mat F,
@ByVal
opencv_core.Size imgSize,
@ByVal
opencv_core.Mat H1,
@ByVal
opencv_core.Mat H2)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean stereoRectifyUncalibrated(@ByVal
opencv_core.UMat points1,
@ByVal
opencv_core.UMat points2,
@ByVal
opencv_core.UMat F,
@ByVal
opencv_core.Size imgSize,
@ByVal
opencv_core.UMat H1,
@ByVal
opencv_core.UMat H2,
double threshold)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean stereoRectifyUncalibrated(@ByVal
opencv_core.UMat points1,
@ByVal
opencv_core.UMat points2,
@ByVal
opencv_core.UMat F,
@ByVal
opencv_core.Size imgSize,
@ByVal
opencv_core.UMat H1,
@ByVal
opencv_core.UMat H2)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean stereoRectifyUncalibrated(@ByVal
opencv_core.GpuMat points1,
@ByVal
opencv_core.GpuMat points2,
@ByVal
opencv_core.GpuMat F,
@ByVal
opencv_core.Size imgSize,
@ByVal
opencv_core.GpuMat H1,
@ByVal
opencv_core.GpuMat H2,
double threshold)
@Namespace(value="cv")
@Cast(value="bool")
public static boolean stereoRectifyUncalibrated(@ByVal
opencv_core.GpuMat points1,
@ByVal
opencv_core.GpuMat points2,
@ByVal
opencv_core.GpuMat F,
@ByVal
opencv_core.Size imgSize,
@ByVal
opencv_core.GpuMat H1,
@ByVal
opencv_core.GpuMat H2)
@Namespace(value="cv")
public static float rectify3Collinear(@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Mat cameraMatrix3,
@ByVal
opencv_core.Mat distCoeffs3,
@ByVal
opencv_core.MatVector imgpt1,
@ByVal
opencv_core.MatVector imgpt3,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R12,
@ByVal
opencv_core.Mat T12,
@ByVal
opencv_core.Mat R13,
@ByVal
opencv_core.Mat T13,
@ByVal
opencv_core.Mat R1,
@ByVal
opencv_core.Mat R2,
@ByVal
opencv_core.Mat R3,
@ByVal
opencv_core.Mat P1,
@ByVal
opencv_core.Mat P2,
@ByVal
opencv_core.Mat P3,
@ByVal
opencv_core.Mat Q,
double alpha,
@ByVal
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
@Namespace(value="cv")
public static float rectify3Collinear(@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Mat cameraMatrix3,
@ByVal
opencv_core.Mat distCoeffs3,
@ByVal
opencv_core.UMatVector imgpt1,
@ByVal
opencv_core.UMatVector imgpt3,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R12,
@ByVal
opencv_core.Mat T12,
@ByVal
opencv_core.Mat R13,
@ByVal
opencv_core.Mat T13,
@ByVal
opencv_core.Mat R1,
@ByVal
opencv_core.Mat R2,
@ByVal
opencv_core.Mat R3,
@ByVal
opencv_core.Mat P1,
@ByVal
opencv_core.Mat P2,
@ByVal
opencv_core.Mat P3,
@ByVal
opencv_core.Mat Q,
double alpha,
@ByVal
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
@Namespace(value="cv")
public static float rectify3Collinear(@ByVal
opencv_core.Mat cameraMatrix1,
@ByVal
opencv_core.Mat distCoeffs1,
@ByVal
opencv_core.Mat cameraMatrix2,
@ByVal
opencv_core.Mat distCoeffs2,
@ByVal
opencv_core.Mat cameraMatrix3,
@ByVal
opencv_core.Mat distCoeffs3,
@ByVal
opencv_core.GpuMatVector imgpt1,
@ByVal
opencv_core.GpuMatVector imgpt3,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R12,
@ByVal
opencv_core.Mat T12,
@ByVal
opencv_core.Mat R13,
@ByVal
opencv_core.Mat T13,
@ByVal
opencv_core.Mat R1,
@ByVal
opencv_core.Mat R2,
@ByVal
opencv_core.Mat R3,
@ByVal
opencv_core.Mat P1,
@ByVal
opencv_core.Mat P2,
@ByVal
opencv_core.Mat P3,
@ByVal
opencv_core.Mat Q,
double alpha,
@ByVal
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
@Namespace(value="cv")
public static float rectify3Collinear(@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.UMat cameraMatrix3,
@ByVal
opencv_core.UMat distCoeffs3,
@ByVal
opencv_core.MatVector imgpt1,
@ByVal
opencv_core.MatVector imgpt3,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R12,
@ByVal
opencv_core.UMat T12,
@ByVal
opencv_core.UMat R13,
@ByVal
opencv_core.UMat T13,
@ByVal
opencv_core.UMat R1,
@ByVal
opencv_core.UMat R2,
@ByVal
opencv_core.UMat R3,
@ByVal
opencv_core.UMat P1,
@ByVal
opencv_core.UMat P2,
@ByVal
opencv_core.UMat P3,
@ByVal
opencv_core.UMat Q,
double alpha,
@ByVal
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
@Namespace(value="cv")
public static float rectify3Collinear(@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.UMat cameraMatrix3,
@ByVal
opencv_core.UMat distCoeffs3,
@ByVal
opencv_core.UMatVector imgpt1,
@ByVal
opencv_core.UMatVector imgpt3,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R12,
@ByVal
opencv_core.UMat T12,
@ByVal
opencv_core.UMat R13,
@ByVal
opencv_core.UMat T13,
@ByVal
opencv_core.UMat R1,
@ByVal
opencv_core.UMat R2,
@ByVal
opencv_core.UMat R3,
@ByVal
opencv_core.UMat P1,
@ByVal
opencv_core.UMat P2,
@ByVal
opencv_core.UMat P3,
@ByVal
opencv_core.UMat Q,
double alpha,
@ByVal
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
@Namespace(value="cv")
public static float rectify3Collinear(@ByVal
opencv_core.UMat cameraMatrix1,
@ByVal
opencv_core.UMat distCoeffs1,
@ByVal
opencv_core.UMat cameraMatrix2,
@ByVal
opencv_core.UMat distCoeffs2,
@ByVal
opencv_core.UMat cameraMatrix3,
@ByVal
opencv_core.UMat distCoeffs3,
@ByVal
opencv_core.GpuMatVector imgpt1,
@ByVal
opencv_core.GpuMatVector imgpt3,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R12,
@ByVal
opencv_core.UMat T12,
@ByVal
opencv_core.UMat R13,
@ByVal
opencv_core.UMat T13,
@ByVal
opencv_core.UMat R1,
@ByVal
opencv_core.UMat R2,
@ByVal
opencv_core.UMat R3,
@ByVal
opencv_core.UMat P1,
@ByVal
opencv_core.UMat P2,
@ByVal
opencv_core.UMat P3,
@ByVal
opencv_core.UMat Q,
double alpha,
@ByVal
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
@Namespace(value="cv")
public static float rectify3Collinear(@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.GpuMat cameraMatrix3,
@ByVal
opencv_core.GpuMat distCoeffs3,
@ByVal
opencv_core.MatVector imgpt1,
@ByVal
opencv_core.MatVector imgpt3,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R12,
@ByVal
opencv_core.GpuMat T12,
@ByVal
opencv_core.GpuMat R13,
@ByVal
opencv_core.GpuMat T13,
@ByVal
opencv_core.GpuMat R1,
@ByVal
opencv_core.GpuMat R2,
@ByVal
opencv_core.GpuMat R3,
@ByVal
opencv_core.GpuMat P1,
@ByVal
opencv_core.GpuMat P2,
@ByVal
opencv_core.GpuMat P3,
@ByVal
opencv_core.GpuMat Q,
double alpha,
@ByVal
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
@Namespace(value="cv")
public static float rectify3Collinear(@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.GpuMat cameraMatrix3,
@ByVal
opencv_core.GpuMat distCoeffs3,
@ByVal
opencv_core.UMatVector imgpt1,
@ByVal
opencv_core.UMatVector imgpt3,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R12,
@ByVal
opencv_core.GpuMat T12,
@ByVal
opencv_core.GpuMat R13,
@ByVal
opencv_core.GpuMat T13,
@ByVal
opencv_core.GpuMat R1,
@ByVal
opencv_core.GpuMat R2,
@ByVal
opencv_core.GpuMat R3,
@ByVal
opencv_core.GpuMat P1,
@ByVal
opencv_core.GpuMat P2,
@ByVal
opencv_core.GpuMat P3,
@ByVal
opencv_core.GpuMat Q,
double alpha,
@ByVal
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
@Namespace(value="cv")
public static float rectify3Collinear(@ByVal
opencv_core.GpuMat cameraMatrix1,
@ByVal
opencv_core.GpuMat distCoeffs1,
@ByVal
opencv_core.GpuMat cameraMatrix2,
@ByVal
opencv_core.GpuMat distCoeffs2,
@ByVal
opencv_core.GpuMat cameraMatrix3,
@ByVal
opencv_core.GpuMat distCoeffs3,
@ByVal
opencv_core.GpuMatVector imgpt1,
@ByVal
opencv_core.GpuMatVector imgpt3,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R12,
@ByVal
opencv_core.GpuMat T12,
@ByVal
opencv_core.GpuMat R13,
@ByVal
opencv_core.GpuMat T13,
@ByVal
opencv_core.GpuMat R1,
@ByVal
opencv_core.GpuMat R2,
@ByVal
opencv_core.GpuMat R3,
@ByVal
opencv_core.GpuMat P1,
@ByVal
opencv_core.GpuMat P2,
@ByVal
opencv_core.GpuMat P3,
@ByVal
opencv_core.GpuMat Q,
double alpha,
@ByVal
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
@Namespace(value="cv") @ByVal public static opencv_core.Mat getOptimalNewCameraMatrix(@ByVal opencv_core.Mat cameraMatrix, @ByVal opencv_core.Mat distCoeffs, @ByVal opencv_core.Size imageSize, double alpha, @ByVal(nullValue="cv::Size()") opencv_core.Size newImgSize, opencv_core.Rect validPixROI, @Cast(value="bool") boolean centerPrincipalPoint)
cameraMatrix - Input camera matrix.distCoeffs - Input vector of distortion coefficients
\f$(k_1, k_2, p_1, p_2[, k_3[, k_4, k_5, k_6 [, s_1, s_2, s_3, s_4[, \tau_x, \tau_y]]]])\f$ of
4, 5, 8, 12 or 14 elements. If the vector is NULL/empty, the zero distortion coefficients are
assumed.imageSize - Original image size.alpha - Free scaling parameter between 0 (when all the pixels in the undistorted image are
valid) and 1 (when all the source image pixels are retained in the undistorted image). See
stereoRectify for details.newImgSize - Image size after rectification. By default, it is set to imageSize .validPixROI - Optional output rectangle that outlines all-good-pixels region in the
undistorted image. See roi1, roi2 description in stereoRectify .centerPrincipalPoint - Optional flag that indicates whether in the new camera matrix the
principal point should be at the image center or not. By default, the principal point is chosen to
best fit a subset of the source image (determined by alpha) to the corrected image.The function computes and returns the optimal new camera matrix based on the free scaling parameter. By varying this parameter, you may retrieve only sensible pixels alpha=0 , keep all the original image pixels if there is valuable information in the corners alpha=1 , or get something in between. When alpha\>0 , the undistorted result is likely to have some black pixels corresponding to "virtual" pixels outside of the captured distorted image. The original camera matrix, distortion coefficients, the computed new camera matrix, and newImageSize should be passed to initUndistortRectifyMap to produce the maps for remap .
@Namespace(value="cv") @ByVal public static opencv_core.Mat getOptimalNewCameraMatrix(@ByVal opencv_core.Mat cameraMatrix, @ByVal opencv_core.Mat distCoeffs, @ByVal opencv_core.Size imageSize, double alpha)
@Namespace(value="cv") @ByVal public static opencv_core.Mat getOptimalNewCameraMatrix(@ByVal opencv_core.UMat cameraMatrix, @ByVal opencv_core.UMat distCoeffs, @ByVal opencv_core.Size imageSize, double alpha, @ByVal(nullValue="cv::Size()") opencv_core.Size newImgSize, opencv_core.Rect validPixROI, @Cast(value="bool") boolean centerPrincipalPoint)
@Namespace(value="cv") @ByVal public static opencv_core.Mat getOptimalNewCameraMatrix(@ByVal opencv_core.UMat cameraMatrix, @ByVal opencv_core.UMat distCoeffs, @ByVal opencv_core.Size imageSize, double alpha)
@Namespace(value="cv") @ByVal public static opencv_core.Mat getOptimalNewCameraMatrix(@ByVal opencv_core.GpuMat cameraMatrix, @ByVal opencv_core.GpuMat distCoeffs, @ByVal opencv_core.Size imageSize, double alpha, @ByVal(nullValue="cv::Size()") opencv_core.Size newImgSize, opencv_core.Rect validPixROI, @Cast(value="bool") boolean centerPrincipalPoint)
@Namespace(value="cv") @ByVal public static opencv_core.Mat getOptimalNewCameraMatrix(@ByVal opencv_core.GpuMat cameraMatrix, @ByVal opencv_core.GpuMat distCoeffs, @ByVal opencv_core.Size imageSize, double alpha)
@Namespace(value="cv")
public static void convertPointsToHomogeneous(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst)
src - Input vector of N-dimensional points.dst - Output vector of N+1-dimensional points.
The function converts points from Euclidean to homogeneous space by appending 1's to the tuple of point coordinates. That is, each point (x1, x2, ..., xn) is converted to (x1, x2, ..., xn, 1).
@Namespace(value="cv")
public static void convertPointsToHomogeneous(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst)
@Namespace(value="cv")
public static void convertPointsToHomogeneous(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst)
@Namespace(value="cv")
public static void convertPointsFromHomogeneous(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst)
src - Input vector of N-dimensional points.dst - Output vector of N-1-dimensional points.
The function converts points homogeneous to Euclidean space using perspective projection. That is, each point (x1, x2, ... x(n-1), xn) is converted to (x1/xn, x2/xn, ..., x(n-1)/xn). When xn=0, the output point coordinates will be (0,0,0,...).
@Namespace(value="cv")
public static void convertPointsFromHomogeneous(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst)
@Namespace(value="cv")
public static void convertPointsFromHomogeneous(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst)
@Namespace(value="cv")
public static void convertPointsHomogeneous(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst)
src - Input array or vector of 2D, 3D, or 4D points.dst - Output vector of 2D, 3D, or 4D points.
The function converts 2D or 3D points from/to homogeneous coordinates by calling either convertPointsToHomogeneous or convertPointsFromHomogeneous.
\note The function is obsolete. Use one of the previous two functions instead.
@Namespace(value="cv")
public static void convertPointsHomogeneous(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst)
@Namespace(value="cv")
public static void convertPointsHomogeneous(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.Mat points1, @ByVal opencv_core.Mat points2, int method, double ransacReprojThreshold, double confidence, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.Mat mask)
points1 - Array of N points from the first image. The point coordinates should be
floating-point (single or double precision).points2 - Array of the second image points of the same size and format as points1 .method - Method for computing a fundamental matrix.
- **CV_FM_7POINT** for a 7-point algorithm. \f$N = 7\f$
- **CV_FM_8POINT** for an 8-point algorithm. \f$N \ge 8\f$
- **CV_FM_RANSAC** for the RANSAC algorithm. \f$N \ge 8\f$
- **CV_FM_LMEDS** for the LMedS algorithm. \f$N \ge 8\f$ransacReprojThreshold - Parameter used only for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.confidence - Parameter used for the RANSAC and LMedS methods only. It specifies a desirable level
of confidence (probability) that the estimated matrix is correct.mask - The epipolar geometry is described by the following equation:
\f[[p_2; 1]^T F [p_1; 1] = 0\f]
where \f$F\f$ is a fundamental matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the second images, respectively.
The function calculates the fundamental matrix using one of four methods listed above and returns the found fundamental matrix. Normally just one matrix is found. But in case of the 7-point algorithm, the function may return up to 3 solutions ( \f$9 \times 3\f$ matrix that stores all 3 matrices sequentially).
The calculated fundamental matrix may be passed further to computeCorrespondEpilines that finds the epipolar lines corresponding to the specified points. It can also be passed to stereoRectifyUncalibrated to compute the rectification transformation. :
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
Mat fundamental_matrix =
findFundamentalMat(points1, points2, FM_RANSAC, 3, 0.99);
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.Mat points1, @ByVal opencv_core.Mat points2)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.UMat points1, @ByVal opencv_core.UMat points2, int method, double ransacReprojThreshold, double confidence, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.UMat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.UMat points1, @ByVal opencv_core.UMat points2)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.GpuMat points1, @ByVal opencv_core.GpuMat points2, int method, double ransacReprojThreshold, double confidence, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.GpuMat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.GpuMat points1, @ByVal opencv_core.GpuMat points2)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.Mat points1, @ByVal opencv_core.Mat points2, @ByVal opencv_core.Mat mask, int method, double ransacReprojThreshold, double confidence)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.Mat points1, @ByVal opencv_core.Mat points2, @ByVal opencv_core.Mat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.UMat points1, @ByVal opencv_core.UMat points2, @ByVal opencv_core.UMat mask, int method, double ransacReprojThreshold, double confidence)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.UMat points1, @ByVal opencv_core.UMat points2, @ByVal opencv_core.UMat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.GpuMat points1, @ByVal opencv_core.GpuMat points2, @ByVal opencv_core.GpuMat mask, int method, double ransacReprojThreshold, double confidence)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findFundamentalMat(@ByVal opencv_core.GpuMat points1, @ByVal opencv_core.GpuMat points2, @ByVal opencv_core.GpuMat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.Mat points1, @ByVal opencv_core.Mat points2, @ByVal opencv_core.Mat cameraMatrix, int method, double prob, double threshold, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.Mat mask)
points1 - Array of N (N \>= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2 - Array of the second image points of the same size and format as points1 .cameraMatrix - Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.method - Method for computing an essential matrix.
- **RANSAC** for the RANSAC algorithm.
- **LMEDS** for the LMedS algorithm.prob - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
confidence (probability) that the estimated matrix is correct.threshold - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.mask - Output array of N elements, every element of which is set to 0 for outliers and to 1
for the other points. The array is computed only in the RANSAC and LMedS methods.
This function estimates essential matrix based on the five-point algorithm solver in \cite Nister03 . \cite SteweniusCFS is also a related. The epipolar geometry is described by the following equation:
\f[[p_2; 1]^T K^{-T} E K^{-1} [p_1; 1] = 0\f]
where \f$E\f$ is an essential matrix, \f$p_1\f$ and \f$p_2\f$ are corresponding points in the first and the second images, respectively. The result of this function may be passed further to decomposeEssentialMat or recoverPose to recover the relative pose between cameras.
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.Mat points1, @ByVal opencv_core.Mat points2, @ByVal opencv_core.Mat cameraMatrix)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.UMat points1, @ByVal opencv_core.UMat points2, @ByVal opencv_core.UMat cameraMatrix, int method, double prob, double threshold, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.UMat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.UMat points1, @ByVal opencv_core.UMat points2, @ByVal opencv_core.UMat cameraMatrix)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.GpuMat points1, @ByVal opencv_core.GpuMat points2, @ByVal opencv_core.GpuMat cameraMatrix, int method, double prob, double threshold, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.GpuMat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.GpuMat points1, @ByVal opencv_core.GpuMat points2, @ByVal opencv_core.GpuMat cameraMatrix)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.Mat points1, @ByVal opencv_core.Mat points2, double focal, @ByVal(nullValue="cv::Point2d(0, 0)") opencv_core.Point2d pp, int method, double prob, double threshold, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.Mat mask)
points1 - Array of N (N \>= 5) 2D points from the first image. The point coordinates should
be floating-point (single or double precision).points2 - Array of the second image points of the same size and format as points1 .focal - focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.pp - principal point of the camera.method - Method for computing a fundamental matrix.
- **RANSAC** for the RANSAC algorithm.
- **LMEDS** for the LMedS algorithm.threshold - Parameter used for RANSAC. It is the maximum distance from a point to an epipolar
line in pixels, beyond which the point is considered an outlier and is not used for computing the
final fundamental matrix. It can be set to something like 1-3, depending on the accuracy of the
point localization, image resolution, and the image noise.prob - Parameter used for the RANSAC or LMedS methods only. It specifies a desirable level of
confidence (probability) that the estimated matrix is correct.mask - Output array of N elements, every element of which is set to 0 for outliers and to 1
for the other points. The array is computed only in the RANSAC and LMedS methods.
This function differs from the one above that it computes camera matrix from focal length and principal point:
\f[K = \begin{bmatrix} f & 0 & x_{pp} \\ 0 & f & y_{pp} \\ 0 & 0 & 1 \end{bmatrix}\f]
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.Mat points1, @ByVal opencv_core.Mat points2)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.UMat points1, @ByVal opencv_core.UMat points2, double focal, @ByVal(nullValue="cv::Point2d(0, 0)") opencv_core.Point2d pp, int method, double prob, double threshold, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.UMat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.UMat points1, @ByVal opencv_core.UMat points2)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.GpuMat points1, @ByVal opencv_core.GpuMat points2, double focal, @ByVal(nullValue="cv::Point2d(0, 0)") opencv_core.Point2d pp, int method, double prob, double threshold, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.GpuMat mask)
@Namespace(value="cv") @ByVal public static opencv_core.Mat findEssentialMat(@ByVal opencv_core.GpuMat points1, @ByVal opencv_core.GpuMat points2)
@Namespace(value="cv")
public static void decomposeEssentialMat(@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat R1,
@ByVal
opencv_core.Mat R2,
@ByVal
opencv_core.Mat t)
E - The input essential matrix.R1 - One possible rotation matrix.R2 - Another possible rotation matrix.t - One possible translation.
This function decompose an essential matrix E using svd decomposition \cite HartleyZ00 . Generally 4 possible poses exists for a given E. They are \f$[R_1, t]\f$, \f$[R_1, -t]\f$, \f$[R_2, t]\f$, \f$[R_2, -t]\f$. By decomposing E, you can only get the direction of the translation, so the function returns unit t.
@Namespace(value="cv")
public static void decomposeEssentialMat(@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat R1,
@ByVal
opencv_core.UMat R2,
@ByVal
opencv_core.UMat t)
@Namespace(value="cv")
public static void decomposeEssentialMat(@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat R1,
@ByVal
opencv_core.GpuMat R2,
@ByVal
opencv_core.GpuMat t)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat points1,
@ByVal
opencv_core.Mat points2,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat t,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.Mat mask)
E - The input essential matrix.points1 - Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2 - Array of the second image points of the same size and format as points1 .cameraMatrix - Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.R - Recovered relative rotation.t - Recovered relative translation.mask - Input/output mask for inliers in points1 and points2.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.
This function decomposes an essential matrix using decomposeEssentialMat and then verifies possible
pose hypotheses by doing cheirality check. The cheirality check basically means that the
triangulated 3D points should have positive depth. Some details can be found in \cite Nister03 .
This function can be used to process output E and mask from findEssentialMat. In this scenario, points1 and points2 are the same input for findEssentialMat. :
// Example. Estimation of fundamental matrix using the RANSAC algorithm
int point_count = 100;
vector<Point2f> points1(point_count);
vector<Point2f> points2(point_count);
// initialize the points here ...
for( int i = 0; i < point_count; i++ )
{
points1[i] = ...;
points2[i] = ...;
}
// cametra matrix with both focal lengths = 1, and principal point = (0, 0)
Mat cameraMatrix = Mat::eye(3, 3, CV_64F);
Mat E, R, t, mask;
E = findEssentialMat(points1, points2, cameraMatrix, RANSAC, 0.999, 1.0, mask);
recoverPose(E, points1, points2, cameraMatrix, R, t, mask);
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat points1,
@ByVal
opencv_core.Mat points2,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat t)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat points1,
@ByVal
opencv_core.UMat points2,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat t,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.UMat mask)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat points1,
@ByVal
opencv_core.UMat points2,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat t)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat points1,
@ByVal
opencv_core.GpuMat points2,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat t,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.GpuMat mask)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat points1,
@ByVal
opencv_core.GpuMat points2,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat t)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat points1,
@ByVal
opencv_core.Mat points2,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat t,
double focal,
@ByVal(nullValue="cv::Point2d(0, 0)")
opencv_core.Point2d pp,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.Mat mask)
E - The input essential matrix.points1 - Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2 - Array of the second image points of the same size and format as points1 .R - Recovered relative rotation.t - Recovered relative translation.focal - Focal length of the camera. Note that this function assumes that points1 and points2
are feature points from cameras with same focal length and principal point.pp - principal point of the camera.mask - Input/output mask for inliers in points1 and points2.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.
This function differs from the one above that it computes camera matrix from focal length and principal point:
\f[K = \begin{bmatrix} f & 0 & x_{pp} \\ 0 & f & y_{pp} \\ 0 & 0 & 1 \end{bmatrix}\f]
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat points1,
@ByVal
opencv_core.Mat points2,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat t)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat points1,
@ByVal
opencv_core.UMat points2,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat t,
double focal,
@ByVal(nullValue="cv::Point2d(0, 0)")
opencv_core.Point2d pp,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.UMat mask)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat points1,
@ByVal
opencv_core.UMat points2,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat t)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat points1,
@ByVal
opencv_core.GpuMat points2,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat t,
double focal,
@ByVal(nullValue="cv::Point2d(0, 0)")
opencv_core.Point2d pp,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.GpuMat mask)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat points1,
@ByVal
opencv_core.GpuMat points2,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat t)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat points1,
@ByVal
opencv_core.Mat points2,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat t,
double distanceThresh,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.Mat mask,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat triangulatedPoints)
E - The input essential matrix.points1 - Array of N 2D points from the first image. The point coordinates should be
floating-point (single or double precision).points2 - Array of the second image points of the same size and format as points1.cameraMatrix - Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ .
Note that this function assumes that points1 and points2 are feature points from cameras with the
same camera matrix.R - Recovered relative rotation.t - Recovered relative translation.distanceThresh - threshold distance which is used to filter out far away points (i.e. infinite points).mask - Input/output mask for inliers in points1 and points2.
: If it is not empty, then it marks inliers in points1 and points2 for then given essential
matrix E. Only these inliers will be used to recover pose. In the output mask only inliers
which pass the cheirality check.triangulatedPoints - 3d points which were reconstructed by triangulation.@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.Mat E,
@ByVal
opencv_core.Mat points1,
@ByVal
opencv_core.Mat points2,
@ByVal
opencv_core.Mat cameraMatrix,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat t,
double distanceThresh)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat points1,
@ByVal
opencv_core.UMat points2,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat t,
double distanceThresh,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.UMat mask,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat triangulatedPoints)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.UMat E,
@ByVal
opencv_core.UMat points1,
@ByVal
opencv_core.UMat points2,
@ByVal
opencv_core.UMat cameraMatrix,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat t,
double distanceThresh)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat points1,
@ByVal
opencv_core.GpuMat points2,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat t,
double distanceThresh,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.GpuMat mask,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat triangulatedPoints)
@Namespace(value="cv")
public static int recoverPose(@ByVal
opencv_core.GpuMat E,
@ByVal
opencv_core.GpuMat points1,
@ByVal
opencv_core.GpuMat points2,
@ByVal
opencv_core.GpuMat cameraMatrix,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat t,
double distanceThresh)
@Namespace(value="cv")
public static void computeCorrespondEpilines(@ByVal
opencv_core.Mat points,
int whichImage,
@ByVal
opencv_core.Mat F,
@ByVal
opencv_core.Mat lines)
points - Input points. \f$N \times 1\f$ or \f$1 \times N\f$ matrix of type CV_32FC2 or
vector\whichImage - Index of the image (1 or 2) that contains the points .F - Fundamental matrix that can be estimated using findFundamentalMat or stereoRectify .lines - Output vector of the epipolar lines corresponding to the points in the other image.
Each line \f$ax + by + c=0\f$ is encoded by 3 numbers \f$(a, b, c)\f$ .
For every point in one of the two images of a stereo pair, the function finds the equation of the corresponding epipolar line in the other image.
From the fundamental matrix definition (see findFundamentalMat ), line \f$l^{(2)}_i\f$ in the second image for the point \f$p^{(1)}_i\f$ in the first image (when whichImage=1 ) is computed as:
\f[l^{(2)}_i = F p^{(1)}_i\f]
And vice versa, when whichImage=2, \f$l^{(1)}_i\f$ is computed from \f$p^{(2)}_i\f$ as:
\f[l^{(1)}_i = F^T p^{(2)}_i\f]
Line coefficients are defined up to a scale. They are normalized so that \f$a_i^2+b_i^2=1\f$ .
@Namespace(value="cv")
public static void computeCorrespondEpilines(@ByVal
opencv_core.UMat points,
int whichImage,
@ByVal
opencv_core.UMat F,
@ByVal
opencv_core.UMat lines)
@Namespace(value="cv")
public static void computeCorrespondEpilines(@ByVal
opencv_core.GpuMat points,
int whichImage,
@ByVal
opencv_core.GpuMat F,
@ByVal
opencv_core.GpuMat lines)
@Namespace(value="cv")
public static void triangulatePoints(@ByVal
opencv_core.Mat projMatr1,
@ByVal
opencv_core.Mat projMatr2,
@ByVal
opencv_core.Mat projPoints1,
@ByVal
opencv_core.Mat projPoints2,
@ByVal
opencv_core.Mat points4D)
projMatr1 - 3x4 projection matrix of the first camera.projMatr2 - 3x4 projection matrix of the second camera.projPoints1 - 2xN array of feature points in the first image. In case of c++ version it can
be also a vector of feature points or two-channel matrix of size 1xN or Nx1.projPoints2 - 2xN array of corresponding points in the second image. In case of c++ version
it can be also a vector of feature points or two-channel matrix of size 1xN or Nx1.points4D - 4xN array of reconstructed points in homogeneous coordinates.
The function reconstructs 3-dimensional points (in homogeneous coordinates) by using their observations with a stereo camera. Projections matrices can be obtained from stereoRectify.
\note Keep in mind that all input data should be of float type in order for this function to work.
\sa reprojectImageTo3D
@Namespace(value="cv")
public static void triangulatePoints(@ByVal
opencv_core.UMat projMatr1,
@ByVal
opencv_core.UMat projMatr2,
@ByVal
opencv_core.UMat projPoints1,
@ByVal
opencv_core.UMat projPoints2,
@ByVal
opencv_core.UMat points4D)
@Namespace(value="cv")
public static void triangulatePoints(@ByVal
opencv_core.GpuMat projMatr1,
@ByVal
opencv_core.GpuMat projMatr2,
@ByVal
opencv_core.GpuMat projPoints1,
@ByVal
opencv_core.GpuMat projPoints2,
@ByVal
opencv_core.GpuMat points4D)
@Namespace(value="cv")
public static void correctMatches(@ByVal
opencv_core.Mat F,
@ByVal
opencv_core.Mat points1,
@ByVal
opencv_core.Mat points2,
@ByVal
opencv_core.Mat newPoints1,
@ByVal
opencv_core.Mat newPoints2)
F - 3x3 fundamental matrix.points1 - 1xN array containing the first set of points.points2 - 1xN array containing the second set of points.newPoints1 - The optimized points1.newPoints2 - The optimized points2.
The function implements the Optimal Triangulation Method (see Multiple View Geometry for details). For each given point correspondence points1[i] \<-\> points2[i], and a fundamental matrix F, it computes the corrected correspondences newPoints1[i] \<-\> newPoints2[i] that minimize the geometric error \f$d(points1[i], newPoints1[i])^2 + d(points2[i],newPoints2[i])^2\f$ (where \f$d(a,b)\f$ is the geometric distance between points \f$a\f$ and \f$b\f$ ) subject to the epipolar constraint \f$newPoints2^T * F * newPoints1 = 0\f$ .
@Namespace(value="cv")
public static void correctMatches(@ByVal
opencv_core.UMat F,
@ByVal
opencv_core.UMat points1,
@ByVal
opencv_core.UMat points2,
@ByVal
opencv_core.UMat newPoints1,
@ByVal
opencv_core.UMat newPoints2)
@Namespace(value="cv")
public static void correctMatches(@ByVal
opencv_core.GpuMat F,
@ByVal
opencv_core.GpuMat points1,
@ByVal
opencv_core.GpuMat points2,
@ByVal
opencv_core.GpuMat newPoints1,
@ByVal
opencv_core.GpuMat newPoints2)
@Namespace(value="cv")
public static void filterSpeckles(@ByVal
opencv_core.Mat img,
double newVal,
int maxSpeckleSize,
double maxDiff,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.Mat buf)
img - The input 16-bit signed disparity imagenewVal - The disparity value used to paint-off the specklesmaxSpeckleSize - The maximum speckle size to consider it a speckle. Larger blobs are not
affected by the algorithmmaxDiff - Maximum difference between neighbor disparity pixels to put them into the same
blob. Note that since StereoBM, StereoSGBM and may be other algorithms return a fixed-point
disparity map, where disparity values are multiplied by 16, this scale factor should be taken into
account when specifying this parameter value.buf - The optional temporary buffer to avoid memory allocation within the function.@Namespace(value="cv")
public static void filterSpeckles(@ByVal
opencv_core.Mat img,
double newVal,
int maxSpeckleSize,
double maxDiff)
@Namespace(value="cv")
public static void filterSpeckles(@ByVal
opencv_core.UMat img,
double newVal,
int maxSpeckleSize,
double maxDiff,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.UMat buf)
@Namespace(value="cv")
public static void filterSpeckles(@ByVal
opencv_core.UMat img,
double newVal,
int maxSpeckleSize,
double maxDiff)
@Namespace(value="cv")
public static void filterSpeckles(@ByVal
opencv_core.GpuMat img,
double newVal,
int maxSpeckleSize,
double maxDiff,
@ByVal(nullValue="cv::InputOutputArray(cv::noArray())")
opencv_core.GpuMat buf)
@Namespace(value="cv")
public static void filterSpeckles(@ByVal
opencv_core.GpuMat img,
double newVal,
int maxSpeckleSize,
double maxDiff)
@Namespace(value="cv") @ByVal public static opencv_core.Rect getValidDisparityROI(@ByVal opencv_core.Rect roi1, @ByVal opencv_core.Rect roi2, int minDisparity, int numberOfDisparities, int SADWindowSize)
@Namespace(value="cv")
public static void validateDisparity(@ByVal
opencv_core.Mat disparity,
@ByVal
opencv_core.Mat cost,
int minDisparity,
int numberOfDisparities,
int disp12MaxDisp)
@Namespace(value="cv")
public static void validateDisparity(@ByVal
opencv_core.Mat disparity,
@ByVal
opencv_core.Mat cost,
int minDisparity,
int numberOfDisparities)
@Namespace(value="cv")
public static void validateDisparity(@ByVal
opencv_core.UMat disparity,
@ByVal
opencv_core.UMat cost,
int minDisparity,
int numberOfDisparities,
int disp12MaxDisp)
@Namespace(value="cv")
public static void validateDisparity(@ByVal
opencv_core.UMat disparity,
@ByVal
opencv_core.UMat cost,
int minDisparity,
int numberOfDisparities)
@Namespace(value="cv")
public static void validateDisparity(@ByVal
opencv_core.GpuMat disparity,
@ByVal
opencv_core.GpuMat cost,
int minDisparity,
int numberOfDisparities,
int disp12MaxDisp)
@Namespace(value="cv")
public static void validateDisparity(@ByVal
opencv_core.GpuMat disparity,
@ByVal
opencv_core.GpuMat cost,
int minDisparity,
int numberOfDisparities)
@Namespace(value="cv")
public static void reprojectImageTo3D(@ByVal
opencv_core.Mat disparity,
@ByVal
opencv_core.Mat _3dImage,
@ByVal
opencv_core.Mat Q,
@Cast(value="bool")
boolean handleMissingValues,
int ddepth)
disparity - Input single-channel 8-bit unsigned, 16-bit signed, 32-bit signed or 32-bit
floating-point disparity image. If 16-bit signed format is used, the values are assumed to have no
fractional bits._3dImage - Output 3-channel floating-point image of the same size as disparity . Each
element of _3dImage(x,y) contains 3D coordinates of the point (x,y) computed from the disparity
map.Q - \f$4 \times 4\f$ perspective transformation matrix that can be obtained with stereoRectify.handleMissingValues - Indicates, whether the function should handle missing values (i.e.
points where the disparity was not computed). If handleMissingValues=true, then pixels with the
minimal disparity that corresponds to the outliers (see StereoMatcher::compute ) are transformed
to 3D points with a very large Z value (currently set to 10000).ddepth - The optional output array depth. If it is -1, the output image will have CV_32F
depth. ddepth can also be set to CV_16S, CV_32S or CV_32F.
The function transforms a single-channel disparity map to a 3-channel image representing a 3D surface. That is, for each pixel (x,y) and the corresponding disparity d=disparity(x,y) , it computes:
\f[\begin{array}{l} [X \; Y \; Z \; W]^T = \texttt{Q} *[x \; y \; \texttt{disparity} (x,y) \; 1]^T \\ \texttt{\_3dImage} (x,y) = (X/W, \; Y/W, \; Z/W) \end{array}\f]
The matrix Q can be an arbitrary \f$4 \times 4\f$ matrix (for example, the one computed by stereoRectify). To reproject a sparse set of points {(x,y,d),...} to 3D space, use perspectiveTransform .
@Namespace(value="cv")
public static void reprojectImageTo3D(@ByVal
opencv_core.Mat disparity,
@ByVal
opencv_core.Mat _3dImage,
@ByVal
opencv_core.Mat Q)
@Namespace(value="cv")
public static void reprojectImageTo3D(@ByVal
opencv_core.UMat disparity,
@ByVal
opencv_core.UMat _3dImage,
@ByVal
opencv_core.UMat Q,
@Cast(value="bool")
boolean handleMissingValues,
int ddepth)
@Namespace(value="cv")
public static void reprojectImageTo3D(@ByVal
opencv_core.UMat disparity,
@ByVal
opencv_core.UMat _3dImage,
@ByVal
opencv_core.UMat Q)
@Namespace(value="cv")
public static void reprojectImageTo3D(@ByVal
opencv_core.GpuMat disparity,
@ByVal
opencv_core.GpuMat _3dImage,
@ByVal
opencv_core.GpuMat Q,
@Cast(value="bool")
boolean handleMissingValues,
int ddepth)
@Namespace(value="cv")
public static void reprojectImageTo3D(@ByVal
opencv_core.GpuMat disparity,
@ByVal
opencv_core.GpuMat _3dImage,
@ByVal
opencv_core.GpuMat Q)
@Namespace(value="cv")
public static double sampsonDistance(@ByVal
opencv_core.Mat pt1,
@ByVal
opencv_core.Mat pt2,
@ByVal
opencv_core.Mat F)
The function cv::sampsonDistance calculates and returns the first order approximation of the geometric error as: \f[ sd( \texttt{pt1} , \texttt{pt2} )= \frac{(\texttt{pt2}^t \cdot \texttt{F} \cdot \texttt{pt1})^2} {((\texttt{F} \cdot \texttt{pt1})(0))^2 + ((\texttt{F} \cdot \texttt{pt1})(1))^2 + ((\texttt{F}^t \cdot \texttt{pt2})(0))^2 + ((\texttt{F}^t \cdot \texttt{pt2})(1))^2} \f] The fundamental matrix may be calculated using the cv::findFundamentalMat function. See \cite HartleyZ00 11.4.3 for details.
pt1 - first homogeneous 2d pointpt2 - second homogeneous 2d pointF - fundamental matrix@Namespace(value="cv")
public static double sampsonDistance(@ByVal
opencv_core.UMat pt1,
@ByVal
opencv_core.UMat pt2,
@ByVal
opencv_core.UMat F)
@Namespace(value="cv")
public static double sampsonDistance(@ByVal
opencv_core.GpuMat pt1,
@ByVal
opencv_core.GpuMat pt2,
@ByVal
opencv_core.GpuMat F)
@Namespace(value="cv")
public static int estimateAffine3D(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
@ByVal
opencv_core.Mat out,
@ByVal
opencv_core.Mat inliers,
double ransacThreshold,
double confidence)
It computes \f[ \begin{bmatrix} x\\ y\\ z\\ \end{bmatrix} = \begin{bmatrix} a_{11} & a_{12} & a_{13}\\ a_{21} & a_{22} & a_{23}\\ a_{31} & a_{32} & a_{33}\\ \end{bmatrix} \begin{bmatrix} X\\ Y\\ Z\\ \end{bmatrix} + \begin{bmatrix} b_1\\ b_2\\ b_3\\ \end{bmatrix} \f]
src - First input 3D point set containing \f$(X,Y,Z)\f$.dst - Second input 3D point set containing \f$(x,y,z)\f$.out - Output 3D affine transformation matrix \f$3 \times 4\f$ of the form
\f[
\begin{bmatrix}
a_{11} & a_{12} & a_{13} & b_1\\
a_{21} & a_{22} & a_{23} & b_2\\
a_{31} & a_{32} & a_{33} & b_3\\
\end{bmatrix}
\f]inliers - Output vector indicating which points are inliers (1-inlier, 0-outlier).ransacThreshold - Maximum reprojection error in the RANSAC algorithm to consider a point as
an inlier.confidence - Confidence level, between 0 and 1, for the estimated transformation. Anything
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.
The function estimates an optimal 3D affine transformation between two 3D point sets using the RANSAC algorithm.
@Namespace(value="cv")
public static int estimateAffine3D(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
@ByVal
opencv_core.Mat out,
@ByVal
opencv_core.Mat inliers)
@Namespace(value="cv")
public static int estimateAffine3D(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
@ByVal
opencv_core.UMat out,
@ByVal
opencv_core.UMat inliers,
double ransacThreshold,
double confidence)
@Namespace(value="cv")
public static int estimateAffine3D(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
@ByVal
opencv_core.UMat out,
@ByVal
opencv_core.UMat inliers)
@Namespace(value="cv")
public static int estimateAffine3D(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
@ByVal
opencv_core.GpuMat out,
@ByVal
opencv_core.GpuMat inliers,
double ransacThreshold,
double confidence)
@Namespace(value="cv")
public static int estimateAffine3D(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
@ByVal
opencv_core.GpuMat out,
@ByVal
opencv_core.GpuMat inliers)
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffine2D(@ByVal opencv_core.Mat from, @ByVal opencv_core.Mat to, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.Mat inliers, int method, double ransacReprojThreshold, @Cast(value="size_t") long maxIters, double confidence, @Cast(value="size_t") long refineIters)
It computes \f[ \begin{bmatrix} x\\ y\\ \end{bmatrix} = \begin{bmatrix} a_{11} & a_{12}\\ a_{21} & a_{22}\\ \end{bmatrix} \begin{bmatrix} X\\ Y\\ \end{bmatrix} + \begin{bmatrix} b_1\\ b_2\\ \end{bmatrix} \f]
from - First input 2D point set containing \f$(X,Y)\f$.to - Second input 2D point set containing \f$(x,y)\f$.inliers - Output vector indicating which points are inliers (1-inlier, 0-outlier).method - Robust method used to compute transformation. The following methods are possible:
- cv::RANSAC - RANSAC-based robust method
- cv::LMEDS - Least-Median robust method
RANSAC is the default method.ransacReprojThreshold - Maximum reprojection error in the RANSAC algorithm to consider
a point as an inlier. Applies only to RANSAC.maxIters - The maximum number of robust method iterations.confidence - Confidence level, between 0 and 1, for the estimated transformation. Anything
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.refineIters - Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
Passing 0 will disable refining, so the output matrix will be output of robust method.
The function estimates an optimal 2D affine transformation between two 2D point sets using the selected robust algorithm.
The computed transformation is then refined further (using only inliers) with the Levenberg-Marquardt method to reduce the re-projection error even more.
\note The RANSAC method can handle practically any ratio of outliers but needs a threshold to distinguish inliers from outliers. The method LMeDS does not need any threshold but it works correctly only when there are more than 50% of inliers.
\sa estimateAffinePartial2D, getAffineTransform
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffine2D(@ByVal opencv_core.Mat from, @ByVal opencv_core.Mat to)
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffine2D(@ByVal opencv_core.UMat from, @ByVal opencv_core.UMat to, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.UMat inliers, int method, double ransacReprojThreshold, @Cast(value="size_t") long maxIters, double confidence, @Cast(value="size_t") long refineIters)
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffine2D(@ByVal opencv_core.UMat from, @ByVal opencv_core.UMat to)
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffine2D(@ByVal opencv_core.GpuMat from, @ByVal opencv_core.GpuMat to, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.GpuMat inliers, int method, double ransacReprojThreshold, @Cast(value="size_t") long maxIters, double confidence, @Cast(value="size_t") long refineIters)
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffine2D(@ByVal opencv_core.GpuMat from, @ByVal opencv_core.GpuMat to)
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffinePartial2D(@ByVal opencv_core.Mat from, @ByVal opencv_core.Mat to, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.Mat inliers, int method, double ransacReprojThreshold, @Cast(value="size_t") long maxIters, double confidence, @Cast(value="size_t") long refineIters)
from - First input 2D point set.to - Second input 2D point set.inliers - Output vector indicating which points are inliers.method - Robust method used to compute transformation. The following methods are possible:
- cv::RANSAC - RANSAC-based robust method
- cv::LMEDS - Least-Median robust method
RANSAC is the default method.ransacReprojThreshold - Maximum reprojection error in the RANSAC algorithm to consider
a point as an inlier. Applies only to RANSAC.maxIters - The maximum number of robust method iterations.confidence - Confidence level, between 0 and 1, for the estimated transformation. Anything
between 0.95 and 0.99 is usually good enough. Values too close to 1 can slow down the estimation
significantly. Values lower than 0.8-0.9 can result in an incorrectly estimated transformation.refineIters - Maximum number of iterations of refining algorithm (Levenberg-Marquardt).
Passing 0 will disable refining, so the output matrix will be output of robust method.
The function estimates an optimal 2D affine transformation with 4 degrees of freedom limited to combinations of translation, rotation, and uniform scaling. Uses the selected algorithm for robust estimation.
The computed transformation is then refined further (using only inliers) with the Levenberg-Marquardt method to reduce the re-projection error even more.
Estimated transformation matrix is: \f[ \begin{bmatrix} \cos(\theta) \cdot s & -\sin(\theta) \cdot s & t_x \\ \sin(\theta) \cdot s & \cos(\theta) \cdot s & t_y \end{bmatrix} \f] Where \f$ \theta \f$ is the rotation angle, \f$ s \f$ the scaling factor and \f$ t_x, t_y \f$ are translations in \f$ x, y \f$ axes respectively.
\note The RANSAC method can handle practically any ratio of outliers but need a threshold to distinguish inliers from outliers. The method LMeDS does not need any threshold but it works correctly only when there are more than 50% of inliers.
\sa estimateAffine2D, getAffineTransform
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffinePartial2D(@ByVal opencv_core.Mat from, @ByVal opencv_core.Mat to)
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffinePartial2D(@ByVal opencv_core.UMat from, @ByVal opencv_core.UMat to, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.UMat inliers, int method, double ransacReprojThreshold, @Cast(value="size_t") long maxIters, double confidence, @Cast(value="size_t") long refineIters)
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffinePartial2D(@ByVal opencv_core.UMat from, @ByVal opencv_core.UMat to)
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffinePartial2D(@ByVal opencv_core.GpuMat from, @ByVal opencv_core.GpuMat to, @ByVal(nullValue="cv::OutputArray(cv::noArray())") opencv_core.GpuMat inliers, int method, double ransacReprojThreshold, @Cast(value="size_t") long maxIters, double confidence, @Cast(value="size_t") long refineIters)
@Namespace(value="cv") @ByVal public static opencv_core.Mat estimateAffinePartial2D(@ByVal opencv_core.GpuMat from, @ByVal opencv_core.GpuMat to)
@Namespace(value="cv")
public static int decomposeHomographyMat(@ByVal
opencv_core.Mat H,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.MatVector rotations,
@ByVal
opencv_core.MatVector translations,
@ByVal
opencv_core.MatVector normals)
H - The input homography matrix between two images.K - The input intrinsic camera calibration matrix.rotations - Array of rotation matrices.translations - Array of translation matrices.normals - Array of plane normal matrices.
This function extracts relative camera motion between two views observing a planar object from the homography H induced by the plane. The intrinsic camera matrix K must also be provided. The function may return up to four mathematical solution sets. At least two of the solutions may further be invalidated if point correspondences are available by applying positive depth constraint (all points must be in front of the camera). The decomposition method is described in detail in \cite Malis .
@Namespace(value="cv")
public static int decomposeHomographyMat(@ByVal
opencv_core.Mat H,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.UMatVector rotations,
@ByVal
opencv_core.UMatVector translations,
@ByVal
opencv_core.UMatVector normals)
@Namespace(value="cv")
public static int decomposeHomographyMat(@ByVal
opencv_core.Mat H,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.GpuMatVector rotations,
@ByVal
opencv_core.GpuMatVector translations,
@ByVal
opencv_core.GpuMatVector normals)
@Namespace(value="cv")
public static int decomposeHomographyMat(@ByVal
opencv_core.UMat H,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.MatVector rotations,
@ByVal
opencv_core.MatVector translations,
@ByVal
opencv_core.MatVector normals)
@Namespace(value="cv")
public static int decomposeHomographyMat(@ByVal
opencv_core.UMat H,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMatVector rotations,
@ByVal
opencv_core.UMatVector translations,
@ByVal
opencv_core.UMatVector normals)
@Namespace(value="cv")
public static int decomposeHomographyMat(@ByVal
opencv_core.UMat H,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.GpuMatVector rotations,
@ByVal
opencv_core.GpuMatVector translations,
@ByVal
opencv_core.GpuMatVector normals)
@Namespace(value="cv")
public static int decomposeHomographyMat(@ByVal
opencv_core.GpuMat H,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.MatVector rotations,
@ByVal
opencv_core.MatVector translations,
@ByVal
opencv_core.MatVector normals)
@Namespace(value="cv")
public static int decomposeHomographyMat(@ByVal
opencv_core.GpuMat H,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.UMatVector rotations,
@ByVal
opencv_core.UMatVector translations,
@ByVal
opencv_core.UMatVector normals)
@Namespace(value="cv")
public static int decomposeHomographyMat(@ByVal
opencv_core.GpuMat H,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMatVector rotations,
@ByVal
opencv_core.GpuMatVector translations,
@ByVal
opencv_core.GpuMatVector normals)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.MatVector rotations,
@ByVal
opencv_core.MatVector normals,
@ByVal
opencv_core.Mat beforePoints,
@ByVal
opencv_core.Mat afterPoints,
@ByVal
opencv_core.Mat possibleSolutions,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.Mat pointsMask)
rotations - Vector of rotation matrices.normals - Vector of plane normal matrices.beforePoints - Vector of (rectified) visible reference points before the homography is appliedafterPoints - Vector of (rectified) visible reference points after the homography is appliedpossibleSolutions - Vector of int indices representing the viable solution set after filteringpointsMask - optional Mat/Vector of 8u type representing the mask for the inliers as given by the findHomography function
This function is intended to filter the output of the decomposeHomographyMat based on additional information as described in \cite Malis . The summary of the method: the decomposeHomographyMat function returns 2 unique solutions and their "opposites" for a total of 4 solutions. If we have access to the sets of points visible in the camera frame before and after the homography transformation is applied, we can determine which are the true potential solutions and which are the opposites by verifying which homographies are consistent with all visible reference points being in front of the camera. The inputs are left unchanged; the filtered solution set is returned as indices into the existing one.
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.MatVector rotations,
@ByVal
opencv_core.MatVector normals,
@ByVal
opencv_core.Mat beforePoints,
@ByVal
opencv_core.Mat afterPoints,
@ByVal
opencv_core.Mat possibleSolutions)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.UMatVector rotations,
@ByVal
opencv_core.UMatVector normals,
@ByVal
opencv_core.Mat beforePoints,
@ByVal
opencv_core.Mat afterPoints,
@ByVal
opencv_core.Mat possibleSolutions,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.Mat pointsMask)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.UMatVector rotations,
@ByVal
opencv_core.UMatVector normals,
@ByVal
opencv_core.Mat beforePoints,
@ByVal
opencv_core.Mat afterPoints,
@ByVal
opencv_core.Mat possibleSolutions)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.GpuMatVector rotations,
@ByVal
opencv_core.GpuMatVector normals,
@ByVal
opencv_core.Mat beforePoints,
@ByVal
opencv_core.Mat afterPoints,
@ByVal
opencv_core.Mat possibleSolutions,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.Mat pointsMask)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.GpuMatVector rotations,
@ByVal
opencv_core.GpuMatVector normals,
@ByVal
opencv_core.Mat beforePoints,
@ByVal
opencv_core.Mat afterPoints,
@ByVal
opencv_core.Mat possibleSolutions)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.MatVector rotations,
@ByVal
opencv_core.MatVector normals,
@ByVal
opencv_core.UMat beforePoints,
@ByVal
opencv_core.UMat afterPoints,
@ByVal
opencv_core.UMat possibleSolutions,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.UMat pointsMask)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.MatVector rotations,
@ByVal
opencv_core.MatVector normals,
@ByVal
opencv_core.UMat beforePoints,
@ByVal
opencv_core.UMat afterPoints,
@ByVal
opencv_core.UMat possibleSolutions)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.UMatVector rotations,
@ByVal
opencv_core.UMatVector normals,
@ByVal
opencv_core.UMat beforePoints,
@ByVal
opencv_core.UMat afterPoints,
@ByVal
opencv_core.UMat possibleSolutions,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.UMat pointsMask)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.UMatVector rotations,
@ByVal
opencv_core.UMatVector normals,
@ByVal
opencv_core.UMat beforePoints,
@ByVal
opencv_core.UMat afterPoints,
@ByVal
opencv_core.UMat possibleSolutions)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.GpuMatVector rotations,
@ByVal
opencv_core.GpuMatVector normals,
@ByVal
opencv_core.UMat beforePoints,
@ByVal
opencv_core.UMat afterPoints,
@ByVal
opencv_core.UMat possibleSolutions,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.UMat pointsMask)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.GpuMatVector rotations,
@ByVal
opencv_core.GpuMatVector normals,
@ByVal
opencv_core.UMat beforePoints,
@ByVal
opencv_core.UMat afterPoints,
@ByVal
opencv_core.UMat possibleSolutions)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.MatVector rotations,
@ByVal
opencv_core.MatVector normals,
@ByVal
opencv_core.GpuMat beforePoints,
@ByVal
opencv_core.GpuMat afterPoints,
@ByVal
opencv_core.GpuMat possibleSolutions,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.GpuMat pointsMask)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.MatVector rotations,
@ByVal
opencv_core.MatVector normals,
@ByVal
opencv_core.GpuMat beforePoints,
@ByVal
opencv_core.GpuMat afterPoints,
@ByVal
opencv_core.GpuMat possibleSolutions)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.UMatVector rotations,
@ByVal
opencv_core.UMatVector normals,
@ByVal
opencv_core.GpuMat beforePoints,
@ByVal
opencv_core.GpuMat afterPoints,
@ByVal
opencv_core.GpuMat possibleSolutions,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.GpuMat pointsMask)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.UMatVector rotations,
@ByVal
opencv_core.UMatVector normals,
@ByVal
opencv_core.GpuMat beforePoints,
@ByVal
opencv_core.GpuMat afterPoints,
@ByVal
opencv_core.GpuMat possibleSolutions)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.GpuMatVector rotations,
@ByVal
opencv_core.GpuMatVector normals,
@ByVal
opencv_core.GpuMat beforePoints,
@ByVal
opencv_core.GpuMat afterPoints,
@ByVal
opencv_core.GpuMat possibleSolutions,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.GpuMat pointsMask)
@Namespace(value="cv")
public static void filterHomographyDecompByVisibleRefpoints(@ByVal
opencv_core.GpuMatVector rotations,
@ByVal
opencv_core.GpuMatVector normals,
@ByVal
opencv_core.GpuMat beforePoints,
@ByVal
opencv_core.GpuMat afterPoints,
@ByVal
opencv_core.GpuMat possibleSolutions)
@Namespace(value="cv::fisheye")
public static void projectPoints(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat imagePoints,
@Const @ByRef
opencv_core.Mat affine,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
double alpha,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat jacobian)
objectPoints - Array of object points, 1xN/Nx1 3-channel (or vector\imagePoints - Output array of image points, 2xN/Nx2 1-channel or 1xN/Nx1 2-channel, or
vector\affine - K - Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.D - Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.alpha - The skew coefficient.jacobian - Optional output 2Nx15 jacobian matrix of derivatives of image points with respect
to components of the focal lengths, coordinates of the principal point, distortion coefficients,
rotation vector, translation vector, and the skew. In the old interface different components of
the jacobian are returned via different output parameters.
The function computes projections of 3D points to the image plane given intrinsic and extrinsic camera parameters. Optionally, the function computes Jacobians - matrices of partial derivatives of image points coordinates (as functions of all the input parameters) with respect to the particular parameters, intrinsic and/or extrinsic.
@Namespace(value="cv::fisheye")
public static void projectPoints(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat imagePoints,
@Const @ByRef
opencv_core.Mat affine,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D)
@Namespace(value="cv::fisheye")
public static void projectPoints(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat imagePoints,
@Const @ByRef
opencv_core.Mat affine,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
double alpha,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat jacobian)
@Namespace(value="cv::fisheye")
public static void projectPoints(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat imagePoints,
@Const @ByRef
opencv_core.Mat affine,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D)
@Namespace(value="cv::fisheye")
public static void projectPoints(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat imagePoints,
@Const @ByRef
opencv_core.Mat affine,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
double alpha,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat jacobian)
@Namespace(value="cv::fisheye")
public static void projectPoints(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat imagePoints,
@Const @ByRef
opencv_core.Mat affine,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D)
@Namespace(value="cv::fisheye")
public static void projectPoints(@ByVal
opencv_core.Mat objectPoints,
@ByVal
opencv_core.Mat imagePoints,
@ByVal
opencv_core.Mat rvec,
@ByVal
opencv_core.Mat tvec,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
double alpha,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.Mat jacobian)
@Namespace(value="cv::fisheye")
public static void projectPoints(@ByVal
opencv_core.UMat objectPoints,
@ByVal
opencv_core.UMat imagePoints,
@ByVal
opencv_core.UMat rvec,
@ByVal
opencv_core.UMat tvec,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
double alpha,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.UMat jacobian)
@Namespace(value="cv::fisheye")
public static void projectPoints(@ByVal
opencv_core.GpuMat objectPoints,
@ByVal
opencv_core.GpuMat imagePoints,
@ByVal
opencv_core.GpuMat rvec,
@ByVal
opencv_core.GpuMat tvec,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
double alpha,
@ByVal(nullValue="cv::OutputArray(cv::noArray())")
opencv_core.GpuMat jacobian)
@Namespace(value="cv::fisheye")
public static void distortPoints(@ByVal
opencv_core.Mat undistorted,
@ByVal
opencv_core.Mat distorted,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
double alpha)
undistorted - Array of object points, 1xN/Nx1 2-channel (or vector\K - Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.D - Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.alpha - The skew coefficient.distorted - Output array of image points, 1xN/Nx1 2-channel, or vector\Note that the function assumes the camera matrix of the undistorted points to be identity. This means if you want to transform back points undistorted with undistortPoints() you have to multiply them with \f$P^{-1}\f$.
@Namespace(value="cv::fisheye")
public static void distortPoints(@ByVal
opencv_core.Mat undistorted,
@ByVal
opencv_core.Mat distorted,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D)
@Namespace(value="cv::fisheye")
public static void distortPoints(@ByVal
opencv_core.UMat undistorted,
@ByVal
opencv_core.UMat distorted,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
double alpha)
@Namespace(value="cv::fisheye")
public static void distortPoints(@ByVal
opencv_core.UMat undistorted,
@ByVal
opencv_core.UMat distorted,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D)
@Namespace(value="cv::fisheye")
public static void distortPoints(@ByVal
opencv_core.GpuMat undistorted,
@ByVal
opencv_core.GpuMat distorted,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
double alpha)
@Namespace(value="cv::fisheye")
public static void distortPoints(@ByVal
opencv_core.GpuMat undistorted,
@ByVal
opencv_core.GpuMat distorted,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D)
@Namespace(value="cv::fisheye")
public static void undistortPoints(@ByVal
opencv_core.Mat distorted,
@ByVal
opencv_core.Mat undistorted,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.Mat R,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.Mat P)
distorted - Array of object points, 1xN/Nx1 2-channel (or vector\K - Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.D - Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.R - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channelP - New camera matrix (3x3) or new projection matrix (3x4)undistorted - Output array of image points, 1xN/Nx1 2-channel, or vector\@Namespace(value="cv::fisheye")
public static void undistortPoints(@ByVal
opencv_core.Mat distorted,
@ByVal
opencv_core.Mat undistorted,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D)
@Namespace(value="cv::fisheye")
public static void undistortPoints(@ByVal
opencv_core.UMat distorted,
@ByVal
opencv_core.UMat undistorted,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.UMat R,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.UMat P)
@Namespace(value="cv::fisheye")
public static void undistortPoints(@ByVal
opencv_core.UMat distorted,
@ByVal
opencv_core.UMat undistorted,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D)
@Namespace(value="cv::fisheye")
public static void undistortPoints(@ByVal
opencv_core.GpuMat distorted,
@ByVal
opencv_core.GpuMat undistorted,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.GpuMat R,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.GpuMat P)
@Namespace(value="cv::fisheye")
public static void undistortPoints(@ByVal
opencv_core.GpuMat distorted,
@ByVal
opencv_core.GpuMat undistorted,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D)
@Namespace(value="cv::fisheye")
public static void initUndistortRectifyMap(@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat P,
@Const @ByRef
opencv_core.Size size,
int m1type,
@ByVal
opencv_core.Mat map1,
@ByVal
opencv_core.Mat map2)
K - Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.D - Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.R - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channelP - New camera matrix (3x3) or new projection matrix (3x4)size - Undistorted image size.m1type - Type of the first output map that can be CV_32FC1 or CV_16SC2 . See convertMaps()
for details.map1 - The first output map.map2 - The second output map.@Namespace(value="cv::fisheye")
public static void initUndistortRectifyMap(@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat P,
@Const @ByRef
opencv_core.Size size,
int m1type,
@ByVal
opencv_core.UMat map1,
@ByVal
opencv_core.UMat map2)
@Namespace(value="cv::fisheye")
public static void initUndistortRectifyMap(@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat P,
@Const @ByRef
opencv_core.Size size,
int m1type,
@ByVal
opencv_core.GpuMat map1,
@ByVal
opencv_core.GpuMat map2)
@Namespace(value="cv::fisheye")
public static void undistortImage(@ByVal
opencv_core.Mat distorted,
@ByVal
opencv_core.Mat undistorted,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.Mat Knew,
@Const @ByRef(nullValue="cv::Size()")
opencv_core.Size new_size)
distorted - image with fisheye lens distortion.undistorted - Output image with compensated fisheye lens distortion.K - Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.D - Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.Knew - Camera matrix of the distorted image. By default, it is the identity matrix but you
may additionally scale and shift the result by using a different matrix.new_size - The function transforms an image to compensate radial and tangential lens distortion.
The function is simply a combination of fisheye::initUndistortRectifyMap (with unity R ) and remap (with bilinear interpolation). See the former function for details of the transformation being performed.
See below the results of undistortImage. - a\) result of undistort of perspective camera model (all possible coefficients (k_1, k_2, k_3, k_4, k_5, k_6) of distortion were optimized under calibration) - b\) result of fisheye::undistortImage of fisheye camera model (all possible coefficients (k_1, k_2, k_3, k_4) of fisheye distortion were optimized under calibration) - c\) original image was captured with fisheye lens
Pictures a) and b) almost the same. But if we consider points of image located far from the center of image, we can notice that on image a) these points are distorted.

@Namespace(value="cv::fisheye")
public static void undistortImage(@ByVal
opencv_core.Mat distorted,
@ByVal
opencv_core.Mat undistorted,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D)
@Namespace(value="cv::fisheye")
public static void undistortImage(@ByVal
opencv_core.UMat distorted,
@ByVal
opencv_core.UMat undistorted,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.UMat Knew,
@Const @ByRef(nullValue="cv::Size()")
opencv_core.Size new_size)
@Namespace(value="cv::fisheye")
public static void undistortImage(@ByVal
opencv_core.UMat distorted,
@ByVal
opencv_core.UMat undistorted,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D)
@Namespace(value="cv::fisheye")
public static void undistortImage(@ByVal
opencv_core.GpuMat distorted,
@ByVal
opencv_core.GpuMat undistorted,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@ByVal(nullValue="cv::InputArray(cv::noArray())")
opencv_core.GpuMat Knew,
@Const @ByRef(nullValue="cv::Size()")
opencv_core.Size new_size)
@Namespace(value="cv::fisheye")
public static void undistortImage(@ByVal
opencv_core.GpuMat distorted,
@ByVal
opencv_core.GpuMat undistorted,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D)
@Namespace(value="cv::fisheye")
public static void estimateNewCameraMatrixForUndistortRectify(@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat P,
double balance,
@Const @ByRef(nullValue="cv::Size()")
opencv_core.Size new_size,
double fov_scale)
K - Camera matrix \f$K = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{_1}\f$.image_size - D - Input vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.R - Rectification transformation in the object space: 3x3 1-channel, or vector: 3x1/1x3
1-channel or 1x1 3-channelP - New camera matrix (3x3) or new projection matrix (3x4)balance - Sets the new focal length in range between the min focal length and the max focal
length. Balance is in range of [0, 1].new_size - fov_scale - Divisor for new focal length.@Namespace(value="cv::fisheye")
public static void estimateNewCameraMatrixForUndistortRectify(@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat P)
@Namespace(value="cv::fisheye")
public static void estimateNewCameraMatrixForUndistortRectify(@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat P,
double balance,
@Const @ByRef(nullValue="cv::Size()")
opencv_core.Size new_size,
double fov_scale)
@Namespace(value="cv::fisheye")
public static void estimateNewCameraMatrixForUndistortRectify(@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat P)
@Namespace(value="cv::fisheye")
public static void estimateNewCameraMatrixForUndistortRectify(@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat P,
double balance,
@Const @ByRef(nullValue="cv::Size()")
opencv_core.Size new_size,
double fov_scale)
@Namespace(value="cv::fisheye")
public static void estimateNewCameraMatrixForUndistortRectify(@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat P)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
objectPoints - vector of vectors of calibration pattern points in the calibration pattern
coordinate space.imagePoints - vector of vectors of the projections of calibration pattern points.
imagePoints.size() and objectPoints.size() and imagePoints[i].size() must be equal to
objectPoints[i].size() for each i.image_size - Size of the image used only to initialize the intrinsic camera matrix.K - Output 3x3 floating-point camera matrix
\f$A = \vecthreethree{f_x}{0}{c_x}{0}{f_y}{c_y}{0}{0}{1}\f$ . If
fisheye::CALIB_USE_INTRINSIC_GUESS/ is specified, some or all of fx, fy, cx, cy must be
initialized before calling the function.D - Output vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$.rvecs - Output vector of rotation vectors (see Rodrigues ) estimated for each pattern view.
That is, each k-th rotation vector together with the corresponding k-th translation vector (see
the next output parameter description) brings the calibration pattern from the model coordinate
space (in which object points are specified) to the world coordinate space, that is, a real
position of the calibration pattern in the k-th pattern view (k=0.. *M* -1).tvecs - Output vector of translation vectors estimated for each pattern view.flags - Different flags that may be zero or a combination of the following values:
- **fisheye::CALIB_USE_INTRINSIC_GUESS** cameraMatrix contains valid initial values of
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
center ( imageSize is used), and focal distances are computed in a least-squares fashion.
- **fisheye::CALIB_RECOMPUTE_EXTRINSIC** Extrinsic will be recomputed after each iteration
of intrinsic optimization.
- **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.
- **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
- **fisheye::CALIB_FIX_K1..fisheye::CALIB_FIX_K4** Selected distortion coefficients
are set to zeros and stay zero.
- **fisheye::CALIB_FIX_PRINCIPAL_POINT** The principal point is not changed during the global
optimization. It stays at the center or at a different location specified when CALIB_USE_INTRINSIC_GUESS is set too.criteria - Termination criteria for the iterative optimization algorithm.@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.Mat K,
@ByVal
opencv_core.Mat D,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.UMat K,
@ByVal
opencv_core.UMat D,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@ByVal
opencv_core.MatVector rvecs,
@ByVal
opencv_core.MatVector tvecs)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@ByVal
opencv_core.UMatVector rvecs,
@ByVal
opencv_core.UMatVector tvecs)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double calibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints,
@Const @ByRef
opencv_core.Size image_size,
@ByVal
opencv_core.GpuMat K,
@ByVal
opencv_core.GpuMat D,
@ByVal
opencv_core.GpuMatVector rvecs,
@ByVal
opencv_core.GpuMatVector tvecs)
@Namespace(value="cv::fisheye")
public static void stereoRectify(@ByVal
opencv_core.Mat K1,
@ByVal
opencv_core.Mat D1,
@ByVal
opencv_core.Mat K2,
@ByVal
opencv_core.Mat D2,
@Const @ByRef
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat tvec,
@ByVal
opencv_core.Mat R1,
@ByVal
opencv_core.Mat R2,
@ByVal
opencv_core.Mat P1,
@ByVal
opencv_core.Mat P2,
@ByVal
opencv_core.Mat Q,
int flags,
@Const @ByRef(nullValue="cv::Size()")
opencv_core.Size newImageSize,
double balance,
double fov_scale)
K1 - First camera matrix.D1 - First camera distortion parameters.K2 - Second camera matrix.D2 - Second camera distortion parameters.imageSize - Size of the image used for stereo calibration.R - Rotation matrix between the coordinate systems of the first and the second
cameras.tvec - Translation vector between coordinate systems of the cameras.R1 - Output 3x3 rectification transform (rotation matrix) for the first camera.R2 - Output 3x3 rectification transform (rotation matrix) for the second camera.P1 - Output 3x4 projection matrix in the new (rectified) coordinate systems for the first
camera.P2 - Output 3x4 projection matrix in the new (rectified) coordinate systems for the second
camera.Q - Output \f$4 \times 4\f$ disparity-to-depth mapping matrix (see reprojectImageTo3D ).flags - Operation flags that may be zero or CALIB_ZERO_DISPARITY . If the flag is set,
the function makes the principal points of each camera have the same pixel coordinates in the
rectified views. And if the flag is not set, the function may still shift the images in the
horizontal or vertical direction (depending on the orientation of epipolar lines) to maximize the
useful image area.newImageSize - New image resolution after rectification. The same size should be passed to
initUndistortRectifyMap (see the stereo_calib.cpp sample in OpenCV samples directory). When (0,0)
is passed (default), it is set to the original imageSize . Setting it to larger value can help you
preserve details in the original image, especially when there is a big radial distortion.balance - Sets the new focal length in range between the min focal length and the max focal
length. Balance is in range of [0, 1].fov_scale - Divisor for new focal length.@Namespace(value="cv::fisheye")
public static void stereoRectify(@ByVal
opencv_core.Mat K1,
@ByVal
opencv_core.Mat D1,
@ByVal
opencv_core.Mat K2,
@ByVal
opencv_core.Mat D2,
@Const @ByRef
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat tvec,
@ByVal
opencv_core.Mat R1,
@ByVal
opencv_core.Mat R2,
@ByVal
opencv_core.Mat P1,
@ByVal
opencv_core.Mat P2,
@ByVal
opencv_core.Mat Q,
int flags)
@Namespace(value="cv::fisheye")
public static void stereoRectify(@ByVal
opencv_core.UMat K1,
@ByVal
opencv_core.UMat D1,
@ByVal
opencv_core.UMat K2,
@ByVal
opencv_core.UMat D2,
@Const @ByRef
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat tvec,
@ByVal
opencv_core.UMat R1,
@ByVal
opencv_core.UMat R2,
@ByVal
opencv_core.UMat P1,
@ByVal
opencv_core.UMat P2,
@ByVal
opencv_core.UMat Q,
int flags,
@Const @ByRef(nullValue="cv::Size()")
opencv_core.Size newImageSize,
double balance,
double fov_scale)
@Namespace(value="cv::fisheye")
public static void stereoRectify(@ByVal
opencv_core.UMat K1,
@ByVal
opencv_core.UMat D1,
@ByVal
opencv_core.UMat K2,
@ByVal
opencv_core.UMat D2,
@Const @ByRef
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat tvec,
@ByVal
opencv_core.UMat R1,
@ByVal
opencv_core.UMat R2,
@ByVal
opencv_core.UMat P1,
@ByVal
opencv_core.UMat P2,
@ByVal
opencv_core.UMat Q,
int flags)
@Namespace(value="cv::fisheye")
public static void stereoRectify(@ByVal
opencv_core.GpuMat K1,
@ByVal
opencv_core.GpuMat D1,
@ByVal
opencv_core.GpuMat K2,
@ByVal
opencv_core.GpuMat D2,
@Const @ByRef
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat tvec,
@ByVal
opencv_core.GpuMat R1,
@ByVal
opencv_core.GpuMat R2,
@ByVal
opencv_core.GpuMat P1,
@ByVal
opencv_core.GpuMat P2,
@ByVal
opencv_core.GpuMat Q,
int flags,
@Const @ByRef(nullValue="cv::Size()")
opencv_core.Size newImageSize,
double balance,
double fov_scale)
@Namespace(value="cv::fisheye")
public static void stereoRectify(@ByVal
opencv_core.GpuMat K1,
@ByVal
opencv_core.GpuMat D1,
@ByVal
opencv_core.GpuMat K2,
@ByVal
opencv_core.GpuMat D2,
@Const @ByRef
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat tvec,
@ByVal
opencv_core.GpuMat R1,
@ByVal
opencv_core.GpuMat R2,
@ByVal
opencv_core.GpuMat P1,
@ByVal
opencv_core.GpuMat P2,
@ByVal
opencv_core.GpuMat Q,
int flags)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.Mat K1,
@ByVal
opencv_core.Mat D1,
@ByVal
opencv_core.Mat K2,
@ByVal
opencv_core.Mat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
objectPoints - Vector of vectors of the calibration pattern points.imagePoints1 - Vector of vectors of the projections of the calibration pattern points,
observed by the first camera.imagePoints2 - Vector of vectors of the projections of the calibration pattern points,
observed by the second camera.K1 - Input/output first camera matrix:
\f$\vecthreethree{f_x^{(j)}}{0}{c_x^{(j)}}{0}{f_y^{(j)}}{c_y^{(j)}}{0}{0}{1}\f$ , \f$j = 0,\, 1\f$ . If
any of fisheye::CALIB_USE_INTRINSIC_GUESS , fisheye::CALIB_FIX_INTRINSIC are specified,
some or all of the matrix components must be initialized.D1 - Input/output vector of distortion coefficients \f$(k_1, k_2, k_3, k_4)\f$ of 4 elements.K2 - Input/output second camera matrix. The parameter is similar to K1 .D2 - Input/output lens distortion coefficients for the second camera. The parameter is
similar to D1 .imageSize - Size of the image used only to initialize intrinsic camera matrix.R - Output rotation matrix between the 1st and the 2nd camera coordinate systems.T - Output translation vector between the coordinate systems of the cameras.flags - Different flags that may be zero or a combination of the following values:
- **fisheye::CALIB_FIX_INTRINSIC** Fix K1, K2? and D1, D2? so that only R, T matrices
are estimated.
- **fisheye::CALIB_USE_INTRINSIC_GUESS** K1, K2 contains valid initial values of
fx, fy, cx, cy that are optimized further. Otherwise, (cx, cy) is initially set to the image
center (imageSize is used), and focal distances are computed in a least-squares fashion.
- **fisheye::CALIB_RECOMPUTE_EXTRINSIC** Extrinsic will be recomputed after each iteration
of intrinsic optimization.
- **fisheye::CALIB_CHECK_COND** The functions will check validity of condition number.
- **fisheye::CALIB_FIX_SKEW** Skew coefficient (alpha) is set to zero and stay zero.
- **fisheye::CALIB_FIX_K1..4** Selected distortion coefficients are set to zeros and stay
zero.criteria - Termination criteria for the iterative optimization algorithm.@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.Mat K1,
@ByVal
opencv_core.Mat D1,
@ByVal
opencv_core.Mat K2,
@ByVal
opencv_core.Mat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.Mat K1,
@ByVal
opencv_core.Mat D1,
@ByVal
opencv_core.Mat K2,
@ByVal
opencv_core.Mat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.Mat K1,
@ByVal
opencv_core.Mat D1,
@ByVal
opencv_core.Mat K2,
@ByVal
opencv_core.Mat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.Mat K1,
@ByVal
opencv_core.Mat D1,
@ByVal
opencv_core.Mat K2,
@ByVal
opencv_core.Mat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.Mat K1,
@ByVal
opencv_core.Mat D1,
@ByVal
opencv_core.Mat K2,
@ByVal
opencv_core.Mat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.Mat R,
@ByVal
opencv_core.Mat T)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.UMat K1,
@ByVal
opencv_core.UMat D1,
@ByVal
opencv_core.UMat K2,
@ByVal
opencv_core.UMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.UMat K1,
@ByVal
opencv_core.UMat D1,
@ByVal
opencv_core.UMat K2,
@ByVal
opencv_core.UMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.UMat K1,
@ByVal
opencv_core.UMat D1,
@ByVal
opencv_core.UMat K2,
@ByVal
opencv_core.UMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.UMat K1,
@ByVal
opencv_core.UMat D1,
@ByVal
opencv_core.UMat K2,
@ByVal
opencv_core.UMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.UMat K1,
@ByVal
opencv_core.UMat D1,
@ByVal
opencv_core.UMat K2,
@ByVal
opencv_core.UMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.UMat K1,
@ByVal
opencv_core.UMat D1,
@ByVal
opencv_core.UMat K2,
@ByVal
opencv_core.UMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.UMat R,
@ByVal
opencv_core.UMat T)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.GpuMat K1,
@ByVal
opencv_core.GpuMat D1,
@ByVal
opencv_core.GpuMat K2,
@ByVal
opencv_core.GpuMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.MatVector objectPoints,
@ByVal
opencv_core.MatVector imagePoints1,
@ByVal
opencv_core.MatVector imagePoints2,
@ByVal
opencv_core.GpuMat K1,
@ByVal
opencv_core.GpuMat D1,
@ByVal
opencv_core.GpuMat K2,
@ByVal
opencv_core.GpuMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.GpuMat K1,
@ByVal
opencv_core.GpuMat D1,
@ByVal
opencv_core.GpuMat K2,
@ByVal
opencv_core.GpuMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.UMatVector objectPoints,
@ByVal
opencv_core.UMatVector imagePoints1,
@ByVal
opencv_core.UMatVector imagePoints2,
@ByVal
opencv_core.GpuMat K1,
@ByVal
opencv_core.GpuMat D1,
@ByVal
opencv_core.GpuMat K2,
@ByVal
opencv_core.GpuMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.GpuMat K1,
@ByVal
opencv_core.GpuMat D1,
@ByVal
opencv_core.GpuMat K2,
@ByVal
opencv_core.GpuMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T,
int flags,
@ByVal(nullValue="cv::TermCriteria(cv::TermCriteria::COUNT + cv::TermCriteria::EPS, 100, DBL_EPSILON)")
opencv_core.TermCriteria criteria)
@Namespace(value="cv::fisheye")
public static double stereoCalibrate(@ByVal
opencv_core.GpuMatVector objectPoints,
@ByVal
opencv_core.GpuMatVector imagePoints1,
@ByVal
opencv_core.GpuMatVector imagePoints2,
@ByVal
opencv_core.GpuMat K1,
@ByVal
opencv_core.GpuMat D1,
@ByVal
opencv_core.GpuMat K2,
@ByVal
opencv_core.GpuMat D2,
@ByVal
opencv_core.Size imageSize,
@ByVal
opencv_core.GpuMat R,
@ByVal
opencv_core.GpuMat T)
Copyright © 2018. All rights reserved.