int |
opencv_objdetect.DetectionBasedTracker.addObject(opencv_core.Rect location) |
opencv_core.GpuMat |
opencv_core.GpuMat.apply(opencv_core.Rect roi) |
opencv_core.Mat |
opencv_core.Mat.apply(opencv_core.Rect roi)
\overload
|
opencv_core.UMat |
opencv_core.UMat.apply(opencv_core.Rect roi) |
opencv_core.MatExpr |
opencv_core.MatExpr.apply(opencv_core.Rect roi) |
boolean |
opencv_objdetect.SimilarRects.apply(opencv_core.Rect r1,
opencv_core.Rect r2) |
void |
opencv_tracking.TrackerKCF.Arg0_Mat_Rect_Mat.call(opencv_core.Mat arg0,
opencv_core.Rect arg1,
opencv_core.Mat arg2) |
static opencv_core.RotatedRect |
opencv_video.CamShift(opencv_core.GpuMat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria) |
static opencv_core.RotatedRect |
opencv_video.CamShift(opencv_core.Mat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria)
\brief Finds an object center, size, and orientation.
|
static opencv_core.RotatedRect |
opencv_video.CamShift(opencv_core.UMat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria) |
float |
opencv_tracking.StrongClassifierDirectSelection.classifySmooth(opencv_core.MatVector images,
opencv_core.Rect sampleROI,
int[] idx) |
float |
opencv_tracking.StrongClassifierDirectSelection.classifySmooth(opencv_core.MatVector images,
opencv_core.Rect sampleROI,
IntBuffer idx) |
float |
opencv_tracking.StrongClassifierDirectSelection.classifySmooth(opencv_core.MatVector images,
opencv_core.Rect sampleROI,
org.bytedeco.javacpp.IntPointer idx) |
static boolean |
opencv_imgproc.clipLine(opencv_core.Rect imgRect,
opencv_core.Point pt1,
opencv_core.Point pt2)
\overload
|
void |
opencv_xfeatures2d.DAISY.compute(opencv_core.GpuMat image,
opencv_core.Rect roi,
opencv_core.GpuMat descriptors) |
void |
opencv_xfeatures2d.DAISY.compute(opencv_core.Mat image,
opencv_core.Rect roi,
opencv_core.Mat descriptors)
\overload
|
void |
opencv_xfeatures2d.DAISY.compute(opencv_core.UMat image,
opencv_core.Rect roi,
opencv_core.UMat descriptors) |
static double |
opencv_ximgproc.computeBadPixelPercent(opencv_core.GpuMat GT,
opencv_core.GpuMat src,
opencv_core.Rect ROI) |
static double |
opencv_ximgproc.computeBadPixelPercent(opencv_core.GpuMat GT,
opencv_core.GpuMat src,
opencv_core.Rect ROI,
int thresh) |
static double |
opencv_ximgproc.computeBadPixelPercent(opencv_core.Mat GT,
opencv_core.Mat src,
opencv_core.Rect ROI) |
static double |
opencv_ximgproc.computeBadPixelPercent(opencv_core.Mat GT,
opencv_core.Mat src,
opencv_core.Rect ROI,
int thresh)
\brief Function for computing the percent of "bad" pixels in the disparity map
(pixels where error is higher than a specified threshold)
|
static double |
opencv_ximgproc.computeBadPixelPercent(opencv_core.UMat GT,
opencv_core.UMat src,
opencv_core.Rect ROI) |
static double |
opencv_ximgproc.computeBadPixelPercent(opencv_core.UMat GT,
opencv_core.UMat src,
opencv_core.Rect ROI,
int thresh) |
static double |
opencv_ximgproc.computeMSE(opencv_core.GpuMat GT,
opencv_core.GpuMat src,
opencv_core.Rect ROI) |
static double |
opencv_ximgproc.computeMSE(opencv_core.Mat GT,
opencv_core.Mat src,
opencv_core.Rect ROI)
\brief Function for computing mean square error for disparity maps
|
static double |
opencv_ximgproc.computeMSE(opencv_core.UMat GT,
opencv_core.UMat src,
opencv_core.Rect ROI) |
opencv_face.FacemarkLBF.Params |
opencv_face.FacemarkLBF.Params.detectROI(opencv_core.Rect detectROI) |
boolean |
opencv_tracking.CvHaarEvaluator.FeatureHaar.eval(opencv_core.Mat image,
opencv_core.Rect ROI,
float[] result) |
boolean |
opencv_tracking.CvHaarEvaluator.FeatureHaar.eval(opencv_core.Mat image,
opencv_core.Rect ROI,
FloatBuffer result) |
boolean |
opencv_tracking.CvHaarEvaluator.FeatureHaar.eval(opencv_core.Mat image,
opencv_core.Rect ROI,
org.bytedeco.javacpp.FloatPointer result) |
void |
opencv_ximgproc.DisparityFilter.filter(opencv_core.GpuMat disparity_map_left,
opencv_core.GpuMat left_view,
opencv_core.GpuMat filtered_disparity_map,
opencv_core.GpuMat disparity_map_right,
opencv_core.Rect ROI,
opencv_core.GpuMat right_view) |
void |
opencv_ximgproc.DisparityFilter.filter(opencv_core.Mat disparity_map_left,
opencv_core.Mat left_view,
opencv_core.Mat filtered_disparity_map,
opencv_core.Mat disparity_map_right,
opencv_core.Rect ROI,
opencv_core.Mat right_view)
\brief Apply filtering to the disparity map.
|
void |
opencv_ximgproc.DisparityFilter.filter(opencv_core.UMat disparity_map_left,
opencv_core.UMat left_view,
opencv_core.UMat filtered_disparity_map,
opencv_core.UMat disparity_map_right,
opencv_core.Rect ROI,
opencv_core.UMat right_view) |
static int |
opencv_imgproc.floodFill(opencv_core.GpuMat image,
opencv_core.GpuMat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags) |
static int |
opencv_imgproc.floodFill(opencv_core.GpuMat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags) |
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags)
\brief Fills a connected component with the given color.
|
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags)
\overload
|
static int |
opencv_imgproc.floodFill(opencv_core.UMat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags) |
static int |
opencv_imgproc.floodFill(opencv_core.UMat image,
opencv_core.UMat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.GpuMat cameraMatrix,
opencv_core.GpuMat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint)
\brief Returns the new camera matrix based on the free scaling parameter.
|
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint) |
static opencv_core.Rect |
opencv_calib3d.getValidDisparityROI(opencv_core.Rect roi1,
opencv_core.Rect roi2,
int minDisparity,
int numberOfDisparities,
int SADWindowSize)
computes valid disparity ROI from the valid ROIs of the rectified images (that are returned by cv::stereoRectify())
|
static void |
opencv_imgproc.grabCut(opencv_core.GpuMat img,
opencv_core.GpuMat mask,
opencv_core.Rect rect,
opencv_core.GpuMat bgdModel,
opencv_core.GpuMat fgdModel,
int iterCount) |
static void |
opencv_imgproc.grabCut(opencv_core.GpuMat img,
opencv_core.GpuMat mask,
opencv_core.Rect rect,
opencv_core.GpuMat bgdModel,
opencv_core.GpuMat fgdModel,
int iterCount,
int mode) |
static void |
opencv_imgproc.grabCut(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Rect rect,
opencv_core.Mat bgdModel,
opencv_core.Mat fgdModel,
int iterCount) |
static void |
opencv_imgproc.grabCut(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Rect rect,
opencv_core.Mat bgdModel,
opencv_core.Mat fgdModel,
int iterCount,
int mode)
\brief Runs the GrabCut algorithm.
|
static void |
opencv_imgproc.grabCut(opencv_core.UMat img,
opencv_core.UMat mask,
opencv_core.Rect rect,
opencv_core.UMat bgdModel,
opencv_core.UMat fgdModel,
int iterCount) |
static void |
opencv_imgproc.grabCut(opencv_core.UMat img,
opencv_core.UMat mask,
opencv_core.Rect rect,
opencv_core.UMat bgdModel,
opencv_core.UMat fgdModel,
int iterCount,
int mode) |
void |
opencv_imgproc.Subdiv2D.initDelaunay(opencv_core.Rect rect)
\brief Creates a new empty Delaunay subdivision
|
opencv_core.RectVector.Iterator |
opencv_core.RectVector.insert(opencv_core.RectVector.Iterator pos,
opencv_core.Rect value) |
boolean |
opencv_core.Point.inside(opencv_core.Rect r)
checks whether the point is inside the specified rectangle
|
opencv_objdetect.DetectionBasedTracker.ExtObject |
opencv_objdetect.DetectionBasedTracker.ExtObject.location(opencv_core.Rect location) |
static int |
opencv_video.meanShift(opencv_core.GpuMat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria) |
static int |
opencv_video.meanShift(opencv_core.Mat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria)
\brief Finds an object on a back projection image.
|
static int |
opencv_video.meanShift(opencv_core.UMat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria) |
static boolean |
opencv_stitching.overlapRoi(opencv_core.Point tl1,
opencv_core.Point tl2,
opencv_core.Size sz1,
opencv_core.Size sz2,
opencv_core.Rect roi) |
void |
opencv_stitching.Blender.prepare(opencv_core.Rect dst_roi)
\overload
|
void |
opencv_stitching.FeatherBlender.prepare(opencv_core.Rect dst_roi) |
void |
opencv_stitching.MultiBandBlender.prepare(opencv_core.Rect dst_roi) |
opencv_core.RectVector |
opencv_core.RectVector.push_back(opencv_core.Rect value) |
opencv_core.RectVector |
opencv_core.RectVector.put(long i,
opencv_core.Rect value) |
opencv_core.RectVector |
opencv_core.RectVector.put(opencv_core.Rect... array) |
opencv_core.RectVector |
opencv_core.RectVector.put(opencv_core.Rect value) |
opencv_core.Rect |
opencv_core.Rect.put(opencv_core.Rect r) |
opencv_text.ERStat |
opencv_text.ERStat.rect(opencv_core.Rect rect) |
static void |
opencv_imgproc.rectangle(opencv_core.Mat img,
opencv_core.Rect rec,
opencv_core.Scalar color) |
static void |
opencv_imgproc.rectangle(opencv_core.Mat img,
opencv_core.Rect rec,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
\overload
|
static float |
opencv_calib3d.rectify3Collinear(opencv_core.GpuMat cameraMatrix1,
opencv_core.GpuMat distCoeffs1,
opencv_core.GpuMat cameraMatrix2,
opencv_core.GpuMat distCoeffs2,
opencv_core.GpuMat cameraMatrix3,
opencv_core.GpuMat distCoeffs3,
opencv_core.GpuMatVector imgpt1,
opencv_core.GpuMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.GpuMat R12,
opencv_core.GpuMat T12,
opencv_core.GpuMat R13,
opencv_core.GpuMat T13,
opencv_core.GpuMat R1,
opencv_core.GpuMat R2,
opencv_core.GpuMat R3,
opencv_core.GpuMat P1,
opencv_core.GpuMat P2,
opencv_core.GpuMat P3,
opencv_core.GpuMat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.GpuMat cameraMatrix1,
opencv_core.GpuMat distCoeffs1,
opencv_core.GpuMat cameraMatrix2,
opencv_core.GpuMat distCoeffs2,
opencv_core.GpuMat cameraMatrix3,
opencv_core.GpuMat distCoeffs3,
opencv_core.MatVector imgpt1,
opencv_core.MatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.GpuMat R12,
opencv_core.GpuMat T12,
opencv_core.GpuMat R13,
opencv_core.GpuMat T13,
opencv_core.GpuMat R1,
opencv_core.GpuMat R2,
opencv_core.GpuMat R3,
opencv_core.GpuMat P1,
opencv_core.GpuMat P2,
opencv_core.GpuMat P3,
opencv_core.GpuMat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.GpuMat cameraMatrix1,
opencv_core.GpuMat distCoeffs1,
opencv_core.GpuMat cameraMatrix2,
opencv_core.GpuMat distCoeffs2,
opencv_core.GpuMat cameraMatrix3,
opencv_core.GpuMat distCoeffs3,
opencv_core.UMatVector imgpt1,
opencv_core.UMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.GpuMat R12,
opencv_core.GpuMat T12,
opencv_core.GpuMat R13,
opencv_core.GpuMat T13,
opencv_core.GpuMat R1,
opencv_core.GpuMat R2,
opencv_core.GpuMat R3,
opencv_core.GpuMat P1,
opencv_core.GpuMat P2,
opencv_core.GpuMat P3,
opencv_core.GpuMat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Mat cameraMatrix3,
opencv_core.Mat distCoeffs3,
opencv_core.GpuMatVector imgpt1,
opencv_core.GpuMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.Mat R12,
opencv_core.Mat T12,
opencv_core.Mat R13,
opencv_core.Mat T13,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat R3,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat P3,
opencv_core.Mat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Mat cameraMatrix3,
opencv_core.Mat distCoeffs3,
opencv_core.MatVector imgpt1,
opencv_core.MatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.Mat R12,
opencv_core.Mat T12,
opencv_core.Mat R13,
opencv_core.Mat T13,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat R3,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat P3,
opencv_core.Mat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
computes the rectification transformations for 3-head camera, where all the heads are on the same line.
|
static float |
opencv_calib3d.rectify3Collinear(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Mat cameraMatrix3,
opencv_core.Mat distCoeffs3,
opencv_core.UMatVector imgpt1,
opencv_core.UMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.Mat R12,
opencv_core.Mat T12,
opencv_core.Mat R13,
opencv_core.Mat T13,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat R3,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat P3,
opencv_core.Mat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.UMat cameraMatrix3,
opencv_core.UMat distCoeffs3,
opencv_core.GpuMatVector imgpt1,
opencv_core.GpuMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.UMat R12,
opencv_core.UMat T12,
opencv_core.UMat R13,
opencv_core.UMat T13,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat R3,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat P3,
opencv_core.UMat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.UMat cameraMatrix3,
opencv_core.UMat distCoeffs3,
opencv_core.MatVector imgpt1,
opencv_core.MatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.UMat R12,
opencv_core.UMat T12,
opencv_core.UMat R13,
opencv_core.UMat T13,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat R3,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat P3,
opencv_core.UMat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.UMat cameraMatrix3,
opencv_core.UMat distCoeffs3,
opencv_core.UMatVector imgpt1,
opencv_core.UMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.UMat R12,
opencv_core.UMat T12,
opencv_core.UMat R13,
opencv_core.UMat T13,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat R3,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat P3,
opencv_core.UMat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static void |
opencv_cudaarithm.rectStdDev(opencv_core.GpuMat src,
opencv_core.GpuMat sqr,
opencv_core.GpuMat dst,
opencv_core.Rect rect) |
static void |
opencv_cudaarithm.rectStdDev(opencv_core.GpuMat src,
opencv_core.GpuMat sqr,
opencv_core.GpuMat dst,
opencv_core.Rect rect,
opencv_core.Stream stream) |
static void |
opencv_cudaarithm.rectStdDev(opencv_core.Mat src,
opencv_core.Mat sqr,
opencv_core.Mat dst,
opencv_core.Rect rect) |
static void |
opencv_cudaarithm.rectStdDev(opencv_core.Mat src,
opencv_core.Mat sqr,
opencv_core.Mat dst,
opencv_core.Rect rect,
opencv_core.Stream stream)
\brief Computes a standard deviation of integral images.
|
static void |
opencv_cudaarithm.rectStdDev(opencv_core.UMat src,
opencv_core.UMat sqr,
opencv_core.UMat dst,
opencv_core.Rect rect) |
static void |
opencv_cudaarithm.rectStdDev(opencv_core.UMat src,
opencv_core.UMat sqr,
opencv_core.UMat dst,
opencv_core.Rect rect,
opencv_core.Stream stream) |
opencv_face.FacemarkAAM.Model.Texture |
opencv_face.FacemarkAAM.Model.Texture.resolution(opencv_core.Rect resolution) |
void |
opencv_tracking.TrackerSampler.sampling(opencv_core.Mat image,
opencv_core.Rect boundingBox)
\brief Computes the regions starting from a position in an image
|
boolean |
opencv_tracking.TrackerSamplerAlgorithm.sampling(opencv_core.Mat image,
opencv_core.Rect boundingBox,
opencv_core.MatVector sample)
\brief Computes the regions starting from a position in an image.
|
boolean |
opencv_tracking.TrackerSamplerCS.samplingImpl(opencv_core.Mat image,
opencv_core.Rect boundingBox,
opencv_core.MatVector sample) |
void |
opencv_calib3d.StereoBM.setROI1(opencv_core.Rect roi1) |
void |
opencv_calib3d.StereoBM.setROI2(opencv_core.Rect roi2) |
void |
opencv_tracking.TrackerStateEstimatorAdaBoosting.setSampleROI(opencv_core.Rect ROI)
\brief Set the sampling ROI
|
static void |
opencv_calib3d.stereoRectify(opencv_core.GpuMat cameraMatrix1,
opencv_core.GpuMat distCoeffs1,
opencv_core.GpuMat cameraMatrix2,
opencv_core.GpuMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.GpuMat R,
opencv_core.GpuMat T,
opencv_core.GpuMat R1,
opencv_core.GpuMat R2,
opencv_core.GpuMat P1,
opencv_core.GpuMat P2,
opencv_core.GpuMat Q,
int flags,
double alpha,
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2) |
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q,
int flags,
double alpha,
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2)
\brief Computes rectification transforms for each head of a calibrated stereo camera.
|
static void |
opencv_calib3d.stereoRectify(opencv_core.UMat cameraMatrix1,
opencv_core.UMat distCoeffs1,
opencv_core.UMat cameraMatrix2,
opencv_core.UMat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.UMat R,
opencv_core.UMat T,
opencv_core.UMat R1,
opencv_core.UMat R2,
opencv_core.UMat P1,
opencv_core.UMat P2,
opencv_core.UMat Q,
int flags,
double alpha,
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2) |