| Package | Description |
|---|---|
| org.bytedeco.javacpp | |
| org.bytedeco.javacpp.helper |
| Modifier and Type | Method and Description |
|---|---|
opencv_core.Mat |
opencv_core.MatExpr.a() |
opencv_core.Mat |
opencv_face.FacemarkAAM.Model.Texture.A()
gray values from all face region in the dataset, projected in PCA space
|
opencv_core.Mat |
opencv_face.FacemarkAAM.Model.Texture.A0()
average of gray values from all face region in the dataset
|
opencv_core.Mat |
opencv_face.FacemarkAAM.Model.Texture.AA()
gray values from all erorded face region in the dataset, projected in PCA space
|
opencv_core.Mat |
opencv_face.FacemarkAAM.Model.Texture.AA0()
average of gray values from all erorded face region in the dataset
|
static opencv_core.Mat |
opencv_core.addPut(opencv_core.Mat a,
opencv_core.Mat b)
\cond IGNORED
|
static opencv_core.Mat |
opencv_core.addPut(opencv_core.Mat a,
opencv_core.Scalar b) |
opencv_core.Mat |
opencv_core.Mat.adjustROI(int dtop,
int dbottom,
int dleft,
int dright)
\brief Adjusts a submatrix size and position within the parent matrix.
|
opencv_core.Mat |
opencv_core.Mat.allocator(opencv_core.MatAllocator allocator) |
static opencv_core.Mat |
opencv_core.andPut(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.Mat |
opencv_core.andPut(opencv_core.Mat a,
opencv_core.Scalar b) |
opencv_core.Mat |
opencv_core.Mat.apply(opencv_core.Range ranges)
\overload
|
opencv_core.Mat |
opencv_core.Mat.apply(opencv_core.Range rowRange,
opencv_core.Range colRange)
\brief Extracts a rectangular submatrix.
|
opencv_core.Mat |
opencv_core.Mat.apply(opencv_core.Rect roi)
\overload
|
opencv_core.Mat |
opencv_core.NAryMatIterator.arrays(int i)
the iterated arrays
|
opencv_core.Mat |
opencv_core.MatExpr.asMat() |
opencv_core.Mat |
opencv_core.MatExpr.b() |
opencv_core.Mat |
opencv_core.PCA.backProject(opencv_core.GpuMat vec) |
opencv_core.Mat |
opencv_core.PCA.backProject(opencv_core.Mat vec)
\brief Reconstructs vectors from their PC projections.
|
opencv_core.Mat |
opencv_core.PCA.backProject(opencv_core.UMat vec) |
static opencv_core.Mat |
opencv_dnn.blobFromImage(opencv_core.GpuMat image) |
static opencv_core.Mat |
opencv_dnn.blobFromImage(opencv_core.GpuMat image,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth) |
static opencv_core.Mat |
opencv_dnn.blobFromImage(opencv_core.Mat image) |
static opencv_core.Mat |
opencv_dnn.blobFromImage(opencv_core.Mat image,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth)
\brief Creates 4-dimensional blob from image.
|
static opencv_core.Mat |
opencv_dnn.blobFromImage(opencv_core.UMat image) |
static opencv_core.Mat |
opencv_dnn.blobFromImage(opencv_core.UMat image,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth) |
static opencv_core.Mat |
opencv_dnn.blobFromImages(opencv_core.GpuMatVector images) |
static opencv_core.Mat |
opencv_dnn.blobFromImages(opencv_core.GpuMatVector images,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth) |
static opencv_core.Mat |
opencv_dnn.blobFromImages(opencv_core.MatVector images) |
static opencv_core.Mat |
opencv_dnn.blobFromImages(opencv_core.MatVector images,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth)
\brief Creates 4-dimensional blob from series of images.
|
static opencv_core.Mat |
opencv_dnn.blobFromImages(opencv_core.UMatVector images) |
static opencv_core.Mat |
opencv_dnn.blobFromImages(opencv_core.UMatVector images,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth) |
opencv_core.Mat |
opencv_aruco.Dictionary.bytesList() |
opencv_core.Mat |
opencv_core.MatExpr.c() |
opencv_core.Mat |
opencv_core.Mat.clone()
\brief Creates a full copy of the array and the underlying data.
|
opencv_core.Mat |
opencv_features2d.BOWTrainer.cluster()
\overload
|
opencv_core.Mat |
opencv_features2d.BOWKMeansTrainer.cluster() |
opencv_core.Mat |
opencv_features2d.BOWTrainer.cluster(opencv_core.Mat descriptors)
\brief Clusters train descriptors.
|
opencv_core.Mat |
opencv_features2d.BOWKMeansTrainer.cluster(opencv_core.Mat descriptors) |
opencv_core.Mat |
opencv_core.Mat.col(int x)
\brief Creates a matrix header for the specified matrix column.
|
opencv_core.Mat |
opencv_core.Mat.colRange(int startcol,
int endcol)
\brief Creates a matrix header for the specified column span.
|
opencv_core.Mat |
opencv_core.Mat.colRange(opencv_core.Range r)
\overload
|
opencv_core.Mat |
opencv_core.Mat.cols(int cols) |
opencv_core.Mat |
opencv_video.KalmanFilter.controlMatrix()
control matrix (B) (not used if there is no control)
|
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.correct(opencv_core.GpuMat measurement) |
opencv_core.Mat |
opencv_video.KalmanFilter.correct(opencv_core.Mat measurement)
\brief Updates the predicted state from the measurement.
|
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.correct(opencv_core.Mat measurement)
The function performs correction step of the algorithm
|
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.correct(opencv_core.UMat measurement) |
opencv_core.Mat |
opencv_core.HostMem.createMatHeader()
returns matrix header with disabled reference counting for HostMem data.
|
static opencv_core.Mat |
opencv_text.createOCRHMMTransitionsTable(org.bytedeco.javacpp.BytePointer vocabulary,
opencv_core.StringVector lexicon) |
static opencv_core.Mat |
opencv_text.createOCRHMMTransitionsTable(String vocabulary,
opencv_core.StringVector lexicon) |
opencv_core.Mat |
opencv_core.Mat.cross(opencv_core.GpuMat m) |
opencv_core.Mat |
opencv_core.Mat.cross(opencv_core.Mat m)
\brief Computes a cross-product of two 3-element vectors.
|
opencv_core.Mat |
opencv_core.MatExpr.cross(opencv_core.Mat m) |
opencv_core.Mat |
opencv_core.Mat.cross(opencv_core.UMat m) |
static opencv_core.Mat |
opencv_core.cvarrToMat(opencv_core.CvArr arr) |
static opencv_core.Mat |
opencv_core.cvarrToMat(opencv_core.CvArr arr,
boolean copyData,
boolean allowND,
int coiMode,
org.bytedeco.javacpp.Pointer buf)
\addtogroup core_c_glue
\{
|
static opencv_core.Mat |
opencv_core.cvarrToMatND(opencv_core.CvArr arr) |
static opencv_core.Mat |
opencv_core.cvarrToMatND(opencv_core.CvArr arr,
boolean copyData,
int coiMode) |
opencv_core.Mat |
opencv_core.Mat.data(org.bytedeco.javacpp.BytePointer data) |
opencv_core.Mat |
opencv_core.Mat.diag() |
opencv_core.Mat |
opencv_core.Mat.diag(int d)
\brief Extracts a diagonal from a matrix
|
static opencv_core.Mat |
opencv_core.Mat.diag(opencv_core.Mat d)
\brief creates a diagonal matrix
|
opencv_core.Mat |
opencv_core.Mat.dims(int dims) |
opencv_core.Mat |
opencv_videostab.FastMarchingMethod.distanceMap() |
static opencv_core.Mat |
opencv_core.dividePut(opencv_core.Mat a,
double b) |
static opencv_core.Mat |
opencv_core.dividePut(opencv_core.Mat a,
opencv_core.Mat b) |
opencv_core.Mat |
opencv_core.PCA.eigenvalues()
eigenvalues of the covariation matrix
|
opencv_core.Mat |
opencv_core.LDA.eigenvalues()
Returns the eigenvalues of this LDA.
|
opencv_core.Mat |
opencv_core.PCA.eigenvectors()
eigenvectors of the covariation matrix
|
opencv_core.Mat |
opencv_core.LDA.eigenvectors()
Returns the eigenvectors of this LDA.
|
static opencv_core.Mat |
opencv_videostab.ensureInclusionConstraint(opencv_core.Mat M,
opencv_core.Size size,
float trimRatio) |
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilterParams.errorCovInit()
State estimate cross-covariance matrix, DP x DP, default is identity.
|
opencv_core.Mat |
opencv_video.KalmanFilter.errorCovPost()
posteriori error estimate covariance matrix (P(k)): P(k)=(I-K(k)*H)*P'(k)
|
opencv_core.Mat |
opencv_video.KalmanFilter.errorCovPre()
priori error estimate covariance matrix (P'(k)): P'(k)=A*P(k-1)*At + Q)
|
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.GpuMat points0,
opencv_core.GpuMat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.GpuMat points0,
opencv_core.GpuMat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.GpuMat points0,
opencv_core.GpuMat points1) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.GpuMat frame0,
opencv_core.GpuMat frame1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.GpuMat frame0,
opencv_core.GpuMat frame1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.GpuMat frame0,
opencv_core.GpuMat frame1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.Mat points0,
opencv_core.Mat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.Mat points0,
opencv_core.Mat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.Mat points0,
opencv_core.Mat points1) |
opencv_core.Mat |
opencv_videostab.ImageMotionEstimatorBase.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
opencv_core.Mat |
opencv_videostab.FromFileMotionReader.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
opencv_core.Mat |
opencv_videostab.ToFileMotionWriter.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.ImageMotionEstimatorBase.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.FromFileMotionReader.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.ToFileMotionWriter.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
org.bytedeco.javacpp.BoolPointer ok)
\brief Estimates global motion between two 2D point clouds.
|
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.ImageMotionEstimatorBase.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.FromFileMotionReader.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.ToFileMotionWriter.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.UMat points0,
opencv_core.UMat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.UMat points0,
opencv_core.UMat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.UMat points0,
opencv_core.UMat points1) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.UMat frame0,
opencv_core.UMat frame1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.UMat frame0,
opencv_core.UMat frame1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.UMat points0,
opencv_core.UMat points1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.UMat frame0,
opencv_core.UMat frame1,
org.bytedeco.javacpp.BoolPointer ok) |
static opencv_core.Mat |
opencv_calib3d.estimateAffine2D(opencv_core.GpuMat from,
opencv_core.GpuMat to) |
static opencv_core.Mat |
opencv_calib3d.estimateAffine2D(opencv_core.GpuMat from,
opencv_core.GpuMat to,
opencv_core.GpuMat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters) |
static opencv_core.Mat |
opencv_calib3d.estimateAffine2D(opencv_core.Mat from,
opencv_core.Mat to) |
static opencv_core.Mat |
opencv_calib3d.estimateAffine2D(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters)
\brief Computes an optimal affine transformation between two 2D point sets.
|
static opencv_core.Mat |
opencv_calib3d.estimateAffine2D(opencv_core.UMat from,
opencv_core.UMat to) |
static opencv_core.Mat |
opencv_calib3d.estimateAffine2D(opencv_core.UMat from,
opencv_core.UMat to,
opencv_core.UMat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters) |
static opencv_core.Mat |
opencv_calib3d.estimateAffinePartial2D(opencv_core.GpuMat from,
opencv_core.GpuMat to) |
static opencv_core.Mat |
opencv_calib3d.estimateAffinePartial2D(opencv_core.GpuMat from,
opencv_core.GpuMat to,
opencv_core.GpuMat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters) |
static opencv_core.Mat |
opencv_calib3d.estimateAffinePartial2D(opencv_core.Mat from,
opencv_core.Mat to) |
static opencv_core.Mat |
opencv_calib3d.estimateAffinePartial2D(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters)
\brief Computes an optimal limited affine transformation with 4 degrees of freedom between
two 2D point sets.
|
static opencv_core.Mat |
opencv_calib3d.estimateAffinePartial2D(opencv_core.UMat from,
opencv_core.UMat to) |
static opencv_core.Mat |
opencv_calib3d.estimateAffinePartial2D(opencv_core.UMat from,
opencv_core.UMat to,
opencv_core.UMat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.GpuMat points0,
opencv_core.GpuMat points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
int model,
float[] rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
int model,
FloatBuffer rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
int model,
org.bytedeco.javacpp.FloatPointer rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Mat points0,
opencv_core.Mat points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
float[] rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
FloatBuffer rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
org.bytedeco.javacpp.FloatPointer rmse)
\addtogroup videostab_motion
\{
|
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.UMat points0,
opencv_core.UMat points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.UMat points0,
opencv_core.UMat points1,
int model,
float[] rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.UMat points0,
opencv_core.UMat points1,
int model,
FloatBuffer rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.UMat points0,
opencv_core.UMat points1,
int model,
org.bytedeco.javacpp.FloatPointer rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.GpuMat points0,
opencv_core.GpuMat points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
int model,
opencv_videostab.RansacParams params,
float[] rmse,
int[] ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
int model,
opencv_videostab.RansacParams params,
FloatBuffer rmse,
IntBuffer ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.GpuMat points0,
opencv_core.GpuMat points1,
int model,
opencv_videostab.RansacParams params,
org.bytedeco.javacpp.FloatPointer rmse,
org.bytedeco.javacpp.IntPointer ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.Mat points0,
opencv_core.Mat points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
opencv_videostab.RansacParams params,
float[] rmse,
int[] ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
opencv_videostab.RansacParams params,
FloatBuffer rmse,
IntBuffer ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
opencv_videostab.RansacParams params,
org.bytedeco.javacpp.FloatPointer rmse,
org.bytedeco.javacpp.IntPointer ninliers)
\brief Estimates best global motion between two 2D point clouds robustly (using RANSAC method).
|
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.UMat points0,
opencv_core.UMat points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.UMat points0,
opencv_core.UMat points1,
int model,
opencv_videostab.RansacParams params,
float[] rmse,
int[] ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.UMat points0,
opencv_core.UMat points1,
int model,
opencv_videostab.RansacParams params,
FloatBuffer rmse,
IntBuffer ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.UMat points0,
opencv_core.UMat points1,
int model,
opencv_videostab.RansacParams params,
org.bytedeco.javacpp.FloatPointer rmse,
org.bytedeco.javacpp.IntPointer ninliers) |
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
boolean fullAffine) |
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
boolean fullAffine,
int ransacMaxIters,
double ransacGoodRatio,
int ransacSize0) |
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.Mat src,
opencv_core.Mat dst,
boolean fullAffine)
\brief Computes an optimal affine transformation between two 2D point sets.
|
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.Mat src,
opencv_core.Mat dst,
boolean fullAffine,
int ransacMaxIters,
double ransacGoodRatio,
int ransacSize0) |
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.UMat src,
opencv_core.UMat dst,
boolean fullAffine) |
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.UMat src,
opencv_core.UMat dst,
boolean fullAffine,
int ransacMaxIters,
double ransacGoodRatio,
int ransacSize0) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.GpuMat points1,
opencv_core.GpuMat points2) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.GpuMat points1,
opencv_core.GpuMat points2,
double focal,
opencv_core.Point2d pp,
int method,
double prob,
double threshold,
opencv_core.GpuMat mask) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.GpuMat points1,
opencv_core.GpuMat points2,
opencv_core.GpuMat cameraMatrix) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.GpuMat points1,
opencv_core.GpuMat points2,
opencv_core.GpuMat cameraMatrix,
int method,
double prob,
double threshold,
opencv_core.GpuMat mask) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.Mat points1,
opencv_core.Mat points2) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.Mat points1,
opencv_core.Mat points2,
double focal,
opencv_core.Point2d pp,
int method,
double prob,
double threshold,
opencv_core.Mat mask)
\overload
|
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat cameraMatrix) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat cameraMatrix,
int method,
double prob,
double threshold,
opencv_core.Mat mask)
\brief Calculates an essential matrix from the corresponding points in two images.
|
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.UMat points1,
opencv_core.UMat points2) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.UMat points1,
opencv_core.UMat points2,
double focal,
opencv_core.Point2d pp,
int method,
double prob,
double threshold,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat cameraMatrix) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat cameraMatrix,
int method,
double prob,
double threshold,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.GpuMat points1,
opencv_core.GpuMat points2) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.GpuMat points1,
opencv_core.GpuMat points2,
int method,
double ransacReprojThreshold,
double confidence,
opencv_core.GpuMat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.GpuMat points1,
opencv_core.GpuMat points2,
opencv_core.GpuMat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.GpuMat points1,
opencv_core.GpuMat points2,
opencv_core.GpuMat mask,
int method,
double ransacReprojThreshold,
double confidence) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
int method,
double ransacReprojThreshold,
double confidence,
opencv_core.Mat mask)
\brief Calculates a fundamental matrix from the corresponding points in two images.
|
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat mask,
int method,
double ransacReprojThreshold,
double confidence)
\overload
|
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.UMat points1,
opencv_core.UMat points2) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.UMat points1,
opencv_core.UMat points2,
int method,
double ransacReprojThreshold,
double confidence,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.UMat points1,
opencv_core.UMat points2,
opencv_core.UMat mask,
int method,
double ransacReprojThreshold,
double confidence) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.GpuMat srcPoints,
opencv_core.GpuMat dstPoints) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.GpuMat srcPoints,
opencv_core.GpuMat dstPoints,
int method,
double ransacReprojThreshold,
opencv_core.GpuMat mask,
int maxIters,
double confidence) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.GpuMat srcPoints,
opencv_core.GpuMat dstPoints,
opencv_core.GpuMat mask) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.GpuMat srcPoints,
opencv_core.GpuMat dstPoints,
opencv_core.GpuMat mask,
int method,
double ransacReprojThreshold) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
int method,
double ransacReprojThreshold,
opencv_core.Mat mask,
int maxIters,
double confidence)
\brief Finds a perspective transformation between two planes.
|
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
opencv_core.Mat mask) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
opencv_core.Mat mask,
int method,
double ransacReprojThreshold)
\overload
|
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.UMat srcPoints,
opencv_core.UMat dstPoints) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.UMat srcPoints,
opencv_core.UMat dstPoints,
int method,
double ransacReprojThreshold,
opencv_core.UMat mask,
int maxIters,
double confidence) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.UMat srcPoints,
opencv_core.UMat dstPoints,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.UMat srcPoints,
opencv_core.UMat dstPoints,
opencv_core.UMat mask,
int method,
double ransacReprojThreshold) |
opencv_core.Mat |
opencv_core.MatBytePairVector.first(long i) |
opencv_core.Mat |
opencv_core.Mat.flags(int flags) |
opencv_core.Mat |
opencv_dnn.Net.forward() |
opencv_core.Mat |
opencv_dnn.Net.forward(org.bytedeco.javacpp.BytePointer outputName)
\brief Runs forward pass to compute output of layer with name \p outputName.
|
opencv_core.Mat |
opencv_dnn.Net.forward(String outputName) |
opencv_core.Mat |
opencv_video.KalmanFilter.gain()
Kalman gain matrix (K(k)): K(k)=P'(k)*Ht*inv(H*P'(k)*Ht+R)
|
opencv_core.Mat |
opencv_objdetect.BaseCascadeClassifier.MaskGenerator.generateMask(opencv_core.Mat src) |
opencv_core.Mat |
opencv_ml.LogisticRegression.get_learnt_thetas()
\brief This function returns the trained parameters arranged across rows.
|
opencv_core.Mat[] |
opencv_core.MatVector.get() |
opencv_core.Mat |
opencv_core.MatVector.Iterator.get() |
opencv_core.Mat[] |
opencv_dnn.MatPointerVector.get() |
opencv_core.Mat |
opencv_dnn.MatPointerVector.Iterator.get() |
opencv_core.Mat |
opencv_core.MatVector.get(long i) |
opencv_core.Mat |
opencv_dnn.MatPointerVector.get(long i) |
static opencv_core.Mat |
opencv_imgproc.getAffineTransform(opencv_core.GpuMat src,
opencv_core.GpuMat dst) |
static opencv_core.Mat |
opencv_imgproc.getAffineTransform(opencv_core.Mat src,
opencv_core.Mat dst) |
static opencv_core.Mat |
opencv_imgproc.getAffineTransform(opencv_core.Point2f src,
opencv_core.Point2f dst)
\brief Calculates an affine transform from three pairs of the corresponding points.
|
static opencv_core.Mat |
opencv_imgproc.getAffineTransform(opencv_core.UMat src,
opencv_core.UMat dst) |
static opencv_core.Mat |
opencv_aruco.Dictionary.getBitsFromByteList(opencv_core.Mat byteList,
int markerSize)
\brief Transform list of bytes to matrix of bits
|
static opencv_core.Mat |
opencv_aruco.Dictionary.getByteListFromBits(opencv_core.Mat bits)
\brief Transform matrix of bits to list of bytes in the 4 rotations
|
opencv_core.Mat |
opencv_ml.TrainData.getCatMap() |
opencv_core.Mat |
opencv_ml.TrainData.getCatOfs() |
opencv_core.Mat |
opencv_ml.TrainData.getClassLabels()
\brief Returns the vector of class labels
|
opencv_core.Mat |
opencv_ml.SVM.getClassWeights() |
opencv_core.Mat |
opencv_tracking.CvFeatureEvaluator.getCls() |
opencv_core.Mat |
opencv_ximgproc.DisparityWLSFilter.getConfidenceMap()
\brief Get the confidence map that was used in the last filter call.
|
opencv_core.Mat |
opencv_tracking.Detector.getConfImageDisplay() |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.GpuMat cameraMatrix) |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.GpuMat cameraMatrix,
opencv_core.Size imgsize,
boolean centerPrincipalPoint) |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.Mat cameraMatrix) |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Size imgsize,
boolean centerPrincipalPoint)
\brief Returns the default new camera matrix.
|
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.UMat cameraMatrix) |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.UMat cameraMatrix,
opencv_core.Size imgsize,
boolean centerPrincipalPoint) |
opencv_core.Mat |
opencv_cudaobjdetect.HOG.getDefaultPeopleDetector()
\brief Returns coefficients of the classifier trained for people detection.
|
opencv_core.Mat |
opencv_ml.TrainData.getDefaultSubstValues() |
opencv_core.Mat |
opencv_face.BasicFaceRecognizer.getEigenValues() |
opencv_core.Mat |
opencv_face.BasicFaceRecognizer.getEigenVectors() |
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.getErrorCov() |
opencv_core.Mat |
opencv_tracking.TrackerStateEstimatorMILBoosting.TrackerMILTargetState.getFeatures()
\brief Get the features extracted
|
static opencv_core.Mat |
opencv_imgproc.getGaborKernel(opencv_core.Size ksize,
double sigma,
double theta,
double lambd,
double gamma) |
static opencv_core.Mat |
opencv_imgproc.getGaborKernel(opencv_core.Size ksize,
double sigma,
double theta,
double lambd,
double gamma,
double psi,
int ktype)
\brief Returns Gabor filter coefficients.
|
static opencv_core.Mat |
opencv_imgproc.getGaussianKernel(int ksize,
double sigma) |
static opencv_core.Mat |
opencv_imgproc.getGaussianKernel(int ksize,
double sigma,
int ktype)
\} imgproc_feature
|
opencv_core.Mat |
opencv_img_hash.RadialVarianceHash.getHash() |
opencv_core.Mat |
opencv_face.BasicFaceRecognizer.getLabels() |
opencv_core.Mat |
opencv_face.LBPHFaceRecognizer.getLabels() |
opencv_core.Mat |
opencv_ml.ANN_MLP.getLayerSizes()
Integer vector specifying the number of neurons in each layer including the input and output layers.
|
opencv_core.Mat |
opencv_bioinspired.Retina.getMagnoRAW()
\overload
|
opencv_core.Mat |
opencv_core.UMat.getMat(int flags) |
opencv_core.Mat |
opencv_face.BasicFaceRecognizer.getMean() |
opencv_core.Mat |
opencv_ml.EM.getMeans()
\brief Returns the cluster centers (means of the Gaussian mixture)
|
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.getMeasurementNoiseCov() |
opencv_core.Mat |
opencv_ml.TrainData.getMissing() |
static opencv_core.Mat |
opencv_videostab.getMotion(int from,
int to,
opencv_core.MatVector motions)
\brief Computes motion between two frames assuming that all the intermediate motions are known.
|
opencv_core.Mat |
opencv_ml.TrainData.getNormCatResponses() |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.GpuMat cameraMatrix,
opencv_core.GpuMat distCoeffs,
opencv_core.Size imageSize,
double alpha) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.GpuMat cameraMatrix,
opencv_core.GpuMat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint)
\brief Returns the new camera matrix based on the free scaling parameter.
|
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
double alpha) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.UMat cameraMatrix,
opencv_core.UMat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint) |
opencv_core.Mat |
opencv_dnn.Net.getParam(opencv_dnn.DictValue layer) |
opencv_core.Mat |
opencv_dnn.Net.getParam(opencv_dnn.DictValue layer,
int numParam)
\brief Returns parameter blob of the layer.
|
opencv_core.Mat |
opencv_bioinspired.Retina.getParvoRAW()
\overload
|
static opencv_core.Mat |
opencv_imgproc.getPerspectiveTransform(opencv_core.GpuMat src,
opencv_core.GpuMat dst) |
static opencv_core.Mat |
opencv_imgproc.getPerspectiveTransform(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Calculates a perspective transform from four pairs of the corresponding points.
|
static opencv_core.Mat |
opencv_imgproc.getPerspectiveTransform(opencv_core.Point2f src,
opencv_core.Point2f dst)
returns 3x3 perspective transformation for the corresponding 4 point pairs.
|
static opencv_core.Mat |
opencv_imgproc.getPerspectiveTransform(opencv_core.UMat src,
opencv_core.UMat dst) |
opencv_core.Mat |
opencv_img_hash.RadialVarianceHash.getPixPerLine(opencv_core.Mat input) |
static opencv_core.Mat |
opencv_dnn.getPlane(opencv_core.Mat m,
int n,
int cn) |
opencv_core.Mat |
opencv_ml.DTrees.getPriors() |
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.getProcessNoiseCov() |
opencv_core.Mat |
opencv_img_hash.RadialVarianceHash.getProjection() |
opencv_core.Mat |
opencv_photo.CalibrateRobertson.getRadiance() |
opencv_core.Mat |
opencv_ml.TrainData.getResponses() |
static opencv_core.Mat |
opencv_imgproc.getRotationMatrix2D(opencv_core.Point2f center,
double angle,
double scale)
\brief Calculates an affine matrix of 2D rotation.
|
opencv_core.Mat |
opencv_ml.TrainData.getSamples() |
opencv_core.Mat |
opencv_ml.TrainData.getSampleWeights() |
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.getState() |
static opencv_core.Mat |
opencv_imgproc.getStructuringElement(int shape,
opencv_core.Size ksize) |
static opencv_core.Mat |
opencv_imgproc.getStructuringElement(int shape,
opencv_core.Size ksize,
opencv_core.Point anchor)
\brief Returns a structuring element of the specified size and shape for morphological operations.
|
static opencv_core.Mat |
opencv_ml.TrainData.getSubMatrix(opencv_core.Mat matrix,
opencv_core.Mat idx,
int layout)
\brief Extract from matrix rows/cols specified by passed indexes.
|
static opencv_core.Mat |
opencv_ml.TrainData.getSubVector(opencv_core.Mat vec,
opencv_core.Mat idx)
\brief Extract from 1D vector elements specified by passed indexes.
|
opencv_core.Mat |
opencv_ml.SVM.getSupportVectors()
\brief Retrieves all the support vectors
|
opencv_core.Mat |
opencv_tracking.TrackerStateEstimatorAdaBoosting.TrackerAdaBoostingTargetState.getTargetResponses()
\brief Get the features extracted
|
opencv_core.Mat |
opencv_ml.TrainData.getTestNormCatResponses() |
opencv_core.Mat |
opencv_ml.TrainData.getTestResponses() |
opencv_core.Mat |
opencv_ml.TrainData.getTestSampleIdx() |
opencv_core.Mat |
opencv_ml.TrainData.getTestSamples()
\brief Returns matrix of test samples
|
opencv_core.Mat |
opencv_ml.TrainData.getTestSampleWeights() |
opencv_core.Mat |
opencv_ml.TrainData.getTrainNormCatResponses()
\brief Returns the vector of normalized categorical responses
|
opencv_core.Mat |
opencv_ml.TrainData.getTrainResponses()
\brief Returns the vector of responses
|
opencv_core.Mat |
opencv_ml.TrainData.getTrainSampleIdx() |
opencv_core.Mat |
opencv_ml.TrainData.getTrainSamples() |
opencv_core.Mat |
opencv_ml.TrainData.getTrainSamples(int layout,
boolean compressSamples,
boolean compressVars)
\brief Returns matrix of train samples
|
opencv_core.Mat |
opencv_ml.TrainData.getTrainSampleWeights() |
opencv_core.Mat |
opencv_ml.SVM.getUncompressedSupportVectors()
\brief Retrieves all the uncompressed support vectors of a linear %SVM
|
opencv_core.Mat |
opencv_ml.TrainData.getVarIdx() |
opencv_core.Mat |
opencv_ml.RTrees.getVarImportance()
Returns the variable importance array.
|
opencv_core.Mat |
opencv_ml.TrainData.getVarSymbolFlags() |
opencv_core.Mat |
opencv_ml.TrainData.getVarType() |
opencv_core.Mat |
opencv_features2d.BOWImgDescriptorExtractor.getVocabulary()
\brief Returns the set vocabulary.
|
opencv_core.Mat |
opencv_ml.EM.getWeights()
\brief Returns weights of the mixtures
|
opencv_core.Mat |
opencv_ml.SVMSGD.getWeights() |
opencv_core.Mat |
opencv_ml.ANN_MLP.getWeights(int layerIdx) |
opencv_core.Mat |
opencv_stitching.MatchesInfo.H()
Estimated transformation
|
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.GpuMat buf,
int flags) |
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.GpuMat buf,
int flags,
opencv_core.Mat dst) |
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.Mat buf,
int flags)
\brief Reads an image from a buffer in memory.
|
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.Mat buf,
int flags,
opencv_core.Mat dst)
\overload
|
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.UMat buf,
int flags) |
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.UMat buf,
int flags,
opencv_core.Mat dst) |
static opencv_core.Mat |
opencv_imgcodecs.imread(org.bytedeco.javacpp.BytePointer filename) |
static opencv_core.Mat |
opencv_imgcodecs.imread(org.bytedeco.javacpp.BytePointer filename,
int flags)
\brief Loads an image from a file.
|
static opencv_core.Mat |
opencv_imgcodecs.imread(String filename) |
static opencv_core.Mat |
opencv_imgcodecs.imread(String filename,
int flags) |
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints,
opencv_core.Size imageSize) |
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints,
opencv_core.Size imageSize,
double aspectRatio) |
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize) |
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
double aspectRatio)
\brief Finds an initial camera matrix from 3D-2D point correspondences.
|
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize) |
static opencv_core.Mat |
opencv_calib3d.initCameraMatrix2D(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
double aspectRatio) |
opencv_core.Mat |
opencv_stitching.CameraParams.K() |
opencv_core.Mat |
opencv_core.MatConstIterator.m() |
opencv_core.Mat |
opencv_core.FileNode.mat()
Simplified reading API to use with bindings.
|
opencv_core.Mat |
opencv_core.PCA.mean()
mean value subtracted before the projection and added after the back projection
|
opencv_core.Mat |
opencv_video.KalmanFilter.measurementMatrix()
measurement matrix (H)
|
opencv_core.Mat |
opencv_video.KalmanFilter.measurementNoiseCov()
measurement noise covariance matrix (R)
|
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilterParams.measurementNoiseCov()
Measurement noise cross-covariance matrix, MP x MP.
|
static opencv_core.Mat |
opencv_core.multiplyPut(opencv_core.Mat a,
double b) |
static opencv_core.Mat |
opencv_core.multiplyPut(opencv_core.Mat a,
opencv_core.Mat b) |
opencv_core.Mat |
opencv_videostab.IFrameSource.nextFrame() |
opencv_core.Mat |
opencv_videostab.NullFrameSource.nextFrame() |
opencv_core.Mat |
opencv_videostab.VideoFileSource.nextFrame() |
opencv_core.Mat |
opencv_videostab.OnePassStabilizer.nextFrame() |
opencv_core.Mat |
opencv_videostab.TwoPassStabilizer.nextFrame() |
static opencv_core.Mat |
opencv_core.noArray()
\brief This type is very similar to InputArray except that it is used for input/output and output function
parameters.
|
static opencv_core.Mat |
opencv_core.orPut(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.Mat |
opencv_core.orPut(opencv_core.Mat a,
opencv_core.Scalar b) |
opencv_core.Mat |
opencv_core.NAryMatIterator.planes()
the current planes
|
opencv_core.Mat |
opencv_core.MatVector.pop_back() |
opencv_core.Mat |
opencv_dnn.MatPointerVector.pop_back() |
opencv_core.Mat |
opencv_core.Mat.position(long position) |
opencv_core.Mat |
opencv_video.KalmanFilter.predict() |
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.predict() |
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.predict(opencv_core.GpuMat control) |
opencv_core.Mat |
opencv_video.KalmanFilter.predict(opencv_core.Mat control)
\brief Computes a predicted state.
|
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.predict(opencv_core.Mat control)
The function performs prediction step of the algorithm
|
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.predict(opencv_core.UMat control) |
opencv_core.Mat |
opencv_video.KalmanFilter.processNoiseCov()
process noise covariance matrix (Q)
|
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilterParams.processNoiseCov()
Process noise cross-covariance matrix, DP x DP.
|
opencv_core.Mat |
opencv_core.PCA.project(opencv_core.GpuMat vec) |
opencv_core.Mat |
opencv_core.LDA.project(opencv_core.GpuMat src) |
opencv_core.Mat |
opencv_core.PCA.project(opencv_core.Mat vec)
\brief Projects vector(s) to the principal component subspace.
|
opencv_core.Mat |
opencv_core.LDA.project(opencv_core.Mat src)
Projects samples into the LDA subspace.
|
opencv_core.Mat |
opencv_face.FacemarkLBF.BBox.project(opencv_core.Mat shape) |
opencv_core.Mat |
opencv_core.PCA.project(opencv_core.UMat vec) |
opencv_core.Mat |
opencv_core.LDA.project(opencv_core.UMat src) |
opencv_core.Mat |
opencv_core.Mat.put(opencv_core.Mat m)
\brief assignment operators
|
opencv_core.Mat |
opencv_core.Mat.put(opencv_core.MatExpr expr)
\overload
|
opencv_core.Mat |
opencv_core.Mat.put(opencv_core.Scalar s)
\brief Sets all or some of the array elements to the specified value.
|
opencv_core.Mat |
opencv_face.FacemarkAAM.Model.Q() |
opencv_core.Mat |
opencv_face.FacemarkAAM.Config.R() |
opencv_core.Mat |
opencv_stitching.CameraParams.R() |
static opencv_core.Mat |
opencv_optflow.readOpticalFlow(org.bytedeco.javacpp.BytePointer path)
\brief Read a .flo file
|
static opencv_core.Mat |
opencv_optflow.readOpticalFlow(String path) |
static opencv_core.Mat |
opencv_dnn.readTorchBlob(org.bytedeco.javacpp.BytePointer filename) |
static opencv_core.Mat |
opencv_dnn.readTorchBlob(org.bytedeco.javacpp.BytePointer filename,
boolean isBinary)
\brief Loads blob which was serialized as torch.Tensor object of Torch7 framework.
|
static opencv_core.Mat |
opencv_dnn.readTorchBlob(String filename) |
static opencv_core.Mat |
opencv_dnn.readTorchBlob(String filename,
boolean isBinary) |
opencv_core.Mat |
opencv_core.LDA.reconstruct(opencv_core.GpuMat src) |
opencv_core.Mat |
opencv_core.LDA.reconstruct(opencv_core.Mat src)
Reconstructs projections from the LDA subspace.
|
opencv_core.Mat |
opencv_core.LDA.reconstruct(opencv_core.UMat src) |
opencv_core.Mat |
opencv_stitching.BundleAdjusterBase.refinementMask() |
static opencv_core.Mat |
opencv_core.repeat(opencv_core.Mat src,
int ny,
int nx)
\overload
|
opencv_core.Mat |
opencv_face.FacemarkLBF.BBox.reproject(opencv_core.Mat shape) |
opencv_core.Mat |
opencv_core.Mat.reshape(int cn) |
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
int rows)
\brief Changes the shape and/or the number of channels of a 2D matrix without copying the data.
|
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
int[] newshape) |
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
IntBuffer newshape) |
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
int newndims,
int[] newsz) |
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
int newndims,
IntBuffer newsz) |
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
int newndims,
org.bytedeco.javacpp.IntPointer newsz)
\overload
|
opencv_core.Mat |
opencv_core.Mat.reshape(int cn,
org.bytedeco.javacpp.IntPointer newshape)
\overload
|
opencv_core.Mat |
opencv_core.Mat.row(int y)
\brief Creates a matrix header for the specified matrix row.
|
opencv_core.Mat |
opencv_core.Mat.rowRange(int startrow,
int endrow)
\brief Creates a matrix header for the specified row span.
|
opencv_core.Mat |
opencv_core.Mat.rowRange(opencv_core.Range r)
\overload
|
opencv_core.Mat |
opencv_core.Mat.rows(int rows) |
opencv_core.Mat |
opencv_face.FacemarkAAM.Model.S()
the encoded shapes from training data
|
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.GpuMat value) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.GpuMat value,
opencv_core.GpuMat mask) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.Mat value) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.Mat value,
opencv_core.Mat mask)
\brief Sets all or some of the array elements to the specified value.
|
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.UMat value) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.UMat value,
opencv_core.UMat mask) |
static opencv_core.Mat |
opencv_dnn.slice(opencv_core.Mat m,
opencv_dnn._Range r0) |
static opencv_core.Mat |
opencv_dnn.slice(opencv_core.Mat m,
opencv_dnn._Range r0,
opencv_dnn._Range r1) |
static opencv_core.Mat |
opencv_dnn.slice(opencv_core.Mat m,
opencv_dnn._Range r0,
opencv_dnn._Range r1,
opencv_dnn._Range r2) |
static opencv_core.Mat |
opencv_dnn.slice(opencv_core.Mat m,
opencv_dnn._Range r0,
opencv_dnn._Range r1,
opencv_dnn._Range r2,
opencv_dnn._Range r3) |
opencv_core.Mat |
opencv_videostab.MotionFilterBase.stabilize(int idx,
opencv_core.MatVector motions,
opencv_core.IntIntPair range) |
opencv_core.Mat |
opencv_videostab.GaussianMotionFilter.stabilize(int idx,
opencv_core.MatVector motions,
opencv_core.IntIntPair range) |
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilterParams.stateInit()
Initial state, DP x 1, default is zero.
|
opencv_core.Mat |
opencv_video.KalmanFilter.statePost()
corrected state (x(k)): x(k)=x'(k)+K(k)*(z(k)-H*x'(k))
|
opencv_core.Mat |
opencv_video.KalmanFilter.statePre()
predicted state (x'(k)): x(k)=A*x(k-1)+B*u(k)
|
static opencv_core.Mat |
opencv_core.LDA.subspaceProject(opencv_core.GpuMat W,
opencv_core.GpuMat mean,
opencv_core.GpuMat src) |
static opencv_core.Mat |
opencv_core.LDA.subspaceProject(opencv_core.Mat W,
opencv_core.Mat mean,
opencv_core.Mat src) |
static opencv_core.Mat |
opencv_core.LDA.subspaceProject(opencv_core.UMat W,
opencv_core.UMat mean,
opencv_core.UMat src) |
static opencv_core.Mat |
opencv_core.LDA.subspaceReconstruct(opencv_core.GpuMat W,
opencv_core.GpuMat mean,
opencv_core.GpuMat src) |
static opencv_core.Mat |
opencv_core.LDA.subspaceReconstruct(opencv_core.Mat W,
opencv_core.Mat mean,
opencv_core.Mat src) |
static opencv_core.Mat |
opencv_core.LDA.subspaceReconstruct(opencv_core.UMat W,
opencv_core.UMat mean,
opencv_core.UMat src) |
static opencv_core.Mat |
opencv_core.subtractPut(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.Mat |
opencv_core.subtractPut(opencv_core.Mat a,
opencv_core.Scalar b) |
opencv_core.Mat |
opencv_stitching.CameraParams.t() |
opencv_core.Mat |
opencv_video.KalmanFilter.temp1() |
opencv_core.Mat |
opencv_video.KalmanFilter.temp2() |
opencv_core.Mat |
opencv_video.KalmanFilter.temp3() |
opencv_core.Mat |
opencv_video.KalmanFilter.temp4() |
opencv_core.Mat |
opencv_video.KalmanFilter.temp5() |
opencv_core.Mat |
opencv_video.KalmanFilter.transitionMatrix()
state transition matrix (A)
|
opencv_core.Mat |
opencv_core.SVD.u()
\todo document
|
opencv_core.Mat |
opencv_core.Mat.u(opencv_core.UMatData u) |
opencv_core.Mat |
opencv_core.SVD.vt() |
opencv_core.Mat |
opencv_core.SVD.w() |
static opencv_core.Mat |
opencv_core.xorPut(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.Mat |
opencv_core.xorPut(opencv_core.Mat a,
opencv_core.Scalar b) |
| Modifier and Type | Method and Description |
|---|---|
void |
opencv_core.RNG._fill(opencv_core.Mat mat,
int distType,
opencv_core.Mat a,
opencv_core.Mat b) |
void |
opencv_core.RNG._fill(opencv_core.Mat mat,
int distType,
opencv_core.Mat a,
opencv_core.Mat b,
boolean saturateRange)
\brief Fills arrays with random numbers.
|
opencv_core.MatExpr |
opencv_core.MatExpr.a(opencv_core.Mat a) |
opencv_face.FacemarkAAM.Model.Texture |
opencv_face.FacemarkAAM.Model.Texture.A(opencv_core.Mat A) |
opencv_face.FacemarkAAM.Model.Texture |
opencv_face.FacemarkAAM.Model.Texture.A0(opencv_core.Mat A0) |
opencv_face.FacemarkAAM.Model.Texture |
opencv_face.FacemarkAAM.Model.Texture.AA(opencv_core.Mat AA) |
opencv_face.FacemarkAAM.Model.Texture |
opencv_face.FacemarkAAM.Model.Texture.AA0(opencv_core.Mat AA0) |
static opencv_core.MatExpr |
opencv_core.abs(opencv_core.Mat m)
\brief Calculates an absolute value of each matrix element.
|
static void |
opencv_cudaarithm.abs(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.abs(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Computes an absolute value of each matrix element.
|
static void |
opencv_core.absdiff(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst)
\brief Calculates the per-element absolute difference between two arrays or between an array and a scalar.
|
static void |
opencv_cudaarithm.absdiff(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.absdiff(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Computes per-element absolute difference of two matrices (or of a matrix and scalar).
|
static opencv_core.Scalar |
opencv_cudaarithm.absSum(opencv_core.Mat src) |
static opencv_core.Scalar |
opencv_cudaarithm.absSum(opencv_core.Mat src,
opencv_core.Mat mask)
\brief Returns the sum of absolute values for matrix elements.
|
static void |
opencv_imgproc.accumulate(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.accumulate(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask)
\} imgproc_misc
|
static void |
opencv_imgproc.accumulateProduct(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_imgproc.accumulateProduct(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask)
\brief Adds the per-element product of two input images to the accumulator image.
|
static void |
opencv_imgproc.accumulateSquare(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.accumulateSquare(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask)
\brief Adds the square of a source image to the accumulator image.
|
static void |
opencv_imgproc.accumulateWeighted(opencv_core.Mat src,
opencv_core.Mat dst,
double alpha) |
static void |
opencv_imgproc.accumulateWeighted(opencv_core.Mat src,
opencv_core.Mat dst,
double alpha,
opencv_core.Mat mask)
\brief Updates a running average.
|
static void |
opencv_imgproc.adaptiveThreshold(opencv_core.Mat src,
opencv_core.Mat dst,
double maxValue,
int adaptiveMethod,
int thresholdType,
int blockSize,
double C)
\brief Applies an adaptive threshold to an array.
|
void |
opencv_features2d.BOWTrainer.add(opencv_core.Mat descriptors)
\brief Adds descriptors to a training set.
|
static opencv_core.MatExpr |
opencv_core.add(opencv_core.MatExpr e,
opencv_core.Mat m) |
static opencv_core.MatExpr |
opencv_core.add(opencv_core.Mat a,
opencv_core.Mat b)
\} core_basic
|
static opencv_core.MatExpr |
opencv_core.add(opencv_core.Mat m,
opencv_core.MatExpr e) |
static void |
opencv_core.add(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.add(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.add(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask,
int dtype)
\brief Calculates the per-element sum of two arrays or an array and a scalar.
|
static void |
opencv_cudaarithm.add(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask,
int dtype,
opencv_core.Stream stream)
\addtogroup cudaarithm
\{
|
static opencv_core.MatExpr |
opencv_core.add(opencv_core.Mat a,
opencv_core.Scalar s) |
static opencv_core.MatExpr |
opencv_core.add(opencv_core.Scalar s,
opencv_core.Mat a) |
boolean |
opencv_tracking.MultiTracker.add(opencv_tracking.Tracker newTracker,
opencv_core.Mat image,
opencv_core.Rect2d boundingBox)
\brief Add a new object to be tracked.
|
boolean |
opencv_tracking.MultiTracker.add(opencv_tracking.TrackerVector newTrackers,
opencv_core.Mat image,
opencv_core.Rect2dVector boundingBox)
\brief Add a set of objects to be tracked.
|
void |
opencv_ximgproc.SelectiveSearchSegmentation.addImage(opencv_core.Mat img)
\brief Add a new image in the list of images to process.
|
static opencv_core.Mat |
opencv_core.addPut(opencv_core.Mat a,
opencv_core.Mat b)
\cond IGNORED
|
static opencv_core.Mat |
opencv_core.addPut(opencv_core.Mat a,
opencv_core.Scalar b) |
boolean |
opencv_tracking.MultiTracker_Alt.addTarget(opencv_core.Mat image,
opencv_core.Rect2d boundingBox,
opencv_tracking.Tracker tracker_algorithm)
\brief Add a new target to a tracking-list and initialize the tracker with a known bounding box that surrounded the target
|
static void |
opencv_highgui.addText(opencv_core.Mat img,
org.bytedeco.javacpp.BytePointer text,
opencv_core.Point org,
org.bytedeco.javacpp.BytePointer nameFont) |
static void |
opencv_highgui.addText(opencv_core.Mat img,
org.bytedeco.javacpp.BytePointer text,
opencv_core.Point org,
org.bytedeco.javacpp.BytePointer nameFont,
int pointSize,
opencv_core.Scalar color,
int weight,
int style,
int spacing)
\brief Draws a text on the image.
|
static void |
opencv_highgui.addText(opencv_core.Mat img,
org.bytedeco.javacpp.BytePointer text,
opencv_core.Point org,
opencv_highgui.QtFont font)
\brief Draws a text on the image.
|
static void |
opencv_highgui.addText(opencv_core.Mat img,
String text,
opencv_core.Point org,
opencv_highgui.QtFont font) |
static void |
opencv_highgui.addText(opencv_core.Mat img,
String text,
opencv_core.Point org,
String nameFont) |
static void |
opencv_highgui.addText(opencv_core.Mat img,
String text,
opencv_core.Point org,
String nameFont,
int pointSize,
opencv_core.Scalar color,
int weight,
int style,
int spacing) |
boolean |
opencv_face.FacemarkTrain.addTrainingSample(opencv_core.Mat image,
opencv_core.Point2fVector landmarks)
\brief Add one training sample to the trainer.
|
static void |
opencv_core.addWeighted(opencv_core.Mat src1,
double alpha,
opencv_core.Mat src2,
double beta,
double gamma,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.addWeighted(opencv_core.Mat src1,
double alpha,
opencv_core.Mat src2,
double beta,
double gamma,
opencv_core.Mat dst) |
static void |
opencv_core.addWeighted(opencv_core.Mat src1,
double alpha,
opencv_core.Mat src2,
double beta,
double gamma,
opencv_core.Mat dst,
int dtype)
\brief Calculates the weighted sum of two arrays.
|
static void |
opencv_cudaarithm.addWeighted(opencv_core.Mat src1,
double alpha,
opencv_core.Mat src2,
double beta,
double gamma,
opencv_core.Mat dst,
int dtype,
opencv_core.Stream stream)
\brief Computes the weighted sum of two arrays.
|
static void |
opencv_features2d.AGAST(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
int threshold) |
static void |
opencv_features2d.AGAST(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
int threshold,
boolean nonmaxSuppression)
\overload
|
static void |
opencv_features2d.AGAST(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
int threshold,
boolean nonmaxSuppression,
int type)
\brief Detects corners using the AGAST algorithm
|
static void |
opencv_cudaimgproc.alphaComp(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat dst,
int alpha_op) |
static void |
opencv_cudaimgproc.alphaComp(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat dst,
int alpha_op,
opencv_core.Stream stream)
\brief Composites two images using alpha opacity values contained in each image.
|
static void |
opencv_ximgproc.amFilter(opencv_core.Mat joint,
opencv_core.Mat src,
opencv_core.Mat dst,
double sigma_s,
double sigma_r) |
static void |
opencv_ximgproc.amFilter(opencv_core.Mat joint,
opencv_core.Mat src,
opencv_core.Mat dst,
double sigma_s,
double sigma_r,
boolean adjust_outliers)
\brief Simple one-line Adaptive Manifold Filter call.
|
static opencv_core.MatExpr |
opencv_core.and(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.MatExpr |
opencv_core.and(opencv_core.Mat a,
opencv_core.Scalar s) |
static opencv_core.MatExpr |
opencv_core.and(opencv_core.Scalar s,
opencv_core.Mat a) |
static opencv_core.Mat |
opencv_core.andPut(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.Mat |
opencv_core.andPut(opencv_core.Mat a,
opencv_core.Scalar b) |
static void |
opencv_ximgproc.anisotropicDiffusion(opencv_core.Mat src,
opencv_core.Mat dst,
float alpha,
float K,
int niters)
\brief Performs anisotropic diffusian on an image.
|
void |
opencv_stitching.ExposureCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.Mat image,
opencv_core.Mat mask)
\brief Compensate exposure in the specified image.
|
void |
opencv_stitching.NoExposureCompensator.apply(int arg0,
opencv_core.Point arg1,
opencv_core.Mat arg2,
opencv_core.Mat arg3) |
void |
opencv_stitching.GainCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.Mat image,
opencv_core.Mat mask) |
void |
opencv_stitching.BlocksGainCompensator.apply(int index,
opencv_core.Point corner,
opencv_core.Mat image,
opencv_core.Mat mask) |
opencv_core.SVD |
opencv_core.SVD.apply(opencv_core.Mat src) |
opencv_core.SVD |
opencv_core.SVD.apply(opencv_core.Mat src,
int flags)
\brief the operator that performs SVD.
|
void |
opencv_cudafilters.Filter.apply(opencv_core.Mat src,
opencv_core.Mat dst) |
void |
opencv_bgsegm.BackgroundSubtractorCNT.apply(opencv_core.Mat image,
opencv_core.Mat fgmask) |
void |
opencv_bgsegm.BackgroundSubtractorGSOC.apply(opencv_core.Mat image,
opencv_core.Mat fgmask) |
void |
opencv_bgsegm.BackgroundSubtractorLSBP.apply(opencv_core.Mat image,
opencv_core.Mat fgmask) |
void |
opencv_video.BackgroundSubtractor.apply(opencv_core.Mat image,
opencv_core.Mat fgmask) |
void |
opencv_video.BackgroundSubtractorMOG2.apply(opencv_core.Mat image,
opencv_core.Mat fgmask) |
void |
opencv_imgproc.CLAHE.apply(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.
|
void |
opencv_bgsegm.BackgroundSubtractorCNT.apply(opencv_core.Mat image,
opencv_core.Mat fgmask,
double learningRate) |
void |
opencv_bgsegm.BackgroundSubtractorGSOC.apply(opencv_core.Mat image,
opencv_core.Mat fgmask,
double learningRate) |
void |
opencv_bgsegm.BackgroundSubtractorLSBP.apply(opencv_core.Mat image,
opencv_core.Mat fgmask,
double learningRate) |
void |
opencv_video.BackgroundSubtractor.apply(opencv_core.Mat image,
opencv_core.Mat fgmask,
double learningRate)
\brief Computes a foreground mask.
|
void |
opencv_video.BackgroundSubtractorMOG2.apply(opencv_core.Mat image,
opencv_core.Mat fgmask,
double learningRate)
\brief Computes a foreground mask.
|
opencv_core.PCA |
opencv_core.PCA.apply(opencv_core.Mat data,
opencv_core.Mat mean,
int flags) |
opencv_core.PCA |
opencv_core.PCA.apply(opencv_core.Mat data,
opencv_core.Mat mean,
int flags,
double retainedVariance)
\overload
|
opencv_core.PCA |
opencv_core.PCA.apply(opencv_core.Mat data,
opencv_core.Mat mean,
int flags,
int maxComponents)
\brief performs %PCA
|
void |
opencv_cudafilters.Filter.apply(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Applies the specified filter to the image.
|
void |
opencv_cudaimgproc.CudaCLAHE.apply(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Equalizes the histogram of a grayscale image using Contrast Limited Adaptive Histogram Equalization.
|
void |
opencv_stitching.FeaturesFinder.apply(opencv_core.Mat image,
opencv_stitching.ImageFeatures features)
\overload
|
void |
opencv_stitching.FeaturesFinder.apply(opencv_core.Mat image,
opencv_stitching.ImageFeatures features,
opencv_core.RectVector rois)
\brief Finds features in the given image.
|
static void |
opencv_xphoto.applyChannelGains(opencv_core.Mat src,
opencv_core.Mat dst,
float gainB,
float gainG,
float gainR)
\brief Implements an efficient fixed-point approximation for applying channel gains, which is
the last step of multiple white balance algorithms.
|
static void |
opencv_imgproc.applyColorMap(opencv_core.Mat src,
opencv_core.Mat dst,
int colormap)
\brief Applies a GNU Octave/MATLAB equivalent colormap on a given image.
|
static void |
opencv_imgproc.applyColorMap(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat userColor)
\brief Applies a user colormap on a given image.
|
void |
opencv_bioinspired.Retina.applyFastToneMapping(opencv_core.Mat inputImage,
opencv_core.Mat outputToneMappedImage)
\brief Method which processes an image in the aim to correct its luminance correct
backlight problems, enhance details in shadows.
|
void |
opencv_bioinspired.RetinaFastToneMapping.applyFastToneMapping(opencv_core.Mat inputImage,
opencv_core.Mat outputToneMappedImage)
\brief applies a luminance correction (initially High Dynamic Range (HDR) tone mapping)
|
float |
opencv_shape.ShapeTransformer.applyTransformation(opencv_core.Mat input) |
float |
opencv_shape.ShapeTransformer.applyTransformation(opencv_core.Mat input,
opencv_core.Mat output)
\brief Apply a transformation, given a pre-estimated transformation parameters.
|
static void |
opencv_imgproc.approxPolyDP(opencv_core.Mat curve,
opencv_core.Mat approxCurve,
double epsilon,
boolean closed)
\brief Approximates a polygonal curve(s) with the specified precision.
|
static double |
opencv_imgproc.arcLength(opencv_core.Mat curve,
boolean closed)
\brief Calculates a contour perimeter or a curve length.
|
static void |
opencv_imgproc.arrowedLine(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color) |
static void |
opencv_imgproc.arrowedLine(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color,
int thickness,
int line_type,
int shift,
double tipLength)
\brief Draws a arrow segment pointing from the first point to the second one.
|
void |
opencv_core.MatOp.assign(opencv_core.MatExpr expr,
opencv_core.Mat m) |
void |
opencv_core.MatOp.assign(opencv_core.MatExpr expr,
opencv_core.Mat m,
int type) |
void |
opencv_core.Mat.assignTo(opencv_core.Mat m) |
void |
opencv_core.Mat.assignTo(opencv_core.Mat m,
int type)
\brief Provides a functional form of convertTo.
|
void |
opencv_core.MatOp.augAssignAdd(opencv_core.MatExpr expr,
opencv_core.Mat m) |
void |
opencv_core.MatOp.augAssignAnd(opencv_core.MatExpr expr,
opencv_core.Mat m) |
void |
opencv_core.MatOp.augAssignDivide(opencv_core.MatExpr expr,
opencv_core.Mat m) |
void |
opencv_core.MatOp.augAssignMultiply(opencv_core.MatExpr expr,
opencv_core.Mat m) |
void |
opencv_core.MatOp.augAssignOr(opencv_core.MatExpr expr,
opencv_core.Mat m) |
void |
opencv_core.MatOp.augAssignSubtract(opencv_core.MatExpr expr,
opencv_core.Mat m) |
void |
opencv_core.MatOp.augAssignXor(opencv_core.MatExpr expr,
opencv_core.Mat m) |
static void |
opencv_img_hash.averageHash(opencv_core.Mat inputArr,
opencv_core.Mat outputArr)
\brief Calculates img_hash::AverageHash in one call
|
opencv_core.MatExpr |
opencv_core.MatExpr.b(opencv_core.Mat b) |
opencv_core.Mat |
opencv_core.PCA.backProject(opencv_core.Mat vec)
\brief Reconstructs vectors from their PC projections.
|
void |
opencv_core.PCA.backProject(opencv_core.Mat vec,
opencv_core.Mat result)
\overload
|
void |
opencv_core.SVD.backSubst(opencv_core.Mat rhs,
opencv_core.Mat dst)
\brief performs a singular value back substitution.
|
static void |
opencv_core.SVD.backSubst(opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt,
opencv_core.Mat rhs,
opencv_core.Mat dst)
\brief performs back substitution
|
void |
opencv_xphoto.WhiteBalancer.balanceWhite(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Applies white balancing to the input image
|
static void |
opencv_core.batchDistance(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dist,
int dtype,
opencv_core.Mat nidx) |
static void |
opencv_core.batchDistance(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dist,
int dtype,
opencv_core.Mat nidx,
int normType,
int K,
opencv_core.Mat mask,
int update,
boolean crosscheck)
\brief naive nearest neighbor finder
|
static void |
opencv_imgproc.bilateralFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace) |
static void |
opencv_imgproc.bilateralFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType)
\brief Applies the bilateral filter to an image.
|
static void |
opencv_cudaimgproc.bilateralFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int kernel_size,
float sigma_color,
float sigma_spatial) |
static void |
opencv_cudaimgproc.bilateralFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int kernel_size,
float sigma_color,
float sigma_spatial,
int borderMode,
opencv_core.Stream stream)
\brief Performs bilateral filtering of passed image
|
static void |
opencv_ximgproc.bilateralTextureFilter(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_ximgproc.bilateralTextureFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int fr,
int numIter,
double sigmaAlpha,
double sigmaAvg)
\brief Applies the bilateral texture filter to an image.
|
static void |
opencv_core.bitwise_and(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.bitwise_and(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.bitwise_and(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask)
\brief computes bitwise conjunction of the two arrays (dst = src1 & src2)
Calculates the per-element bit-wise conjunction of two arrays or an
array and a scalar.
|
static void |
opencv_cudaarithm.bitwise_and(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask,
opencv_core.Stream stream)
\brief Performs a per-element bitwise conjunction of two matrices (or of matrix and scalar).
|
static void |
opencv_core.bitwise_not(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.bitwise_not(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.bitwise_not(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask)
\brief Inverts every bit of an array.
|
static void |
opencv_cudaarithm.bitwise_not(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask,
opencv_core.Stream stream)
\brief Performs a per-element bitwise inversion.
|
static void |
opencv_core.bitwise_or(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.bitwise_or(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.bitwise_or(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask)
\brief Calculates the per-element bit-wise disjunction of two arrays or an
array and a scalar.
|
static void |
opencv_cudaarithm.bitwise_or(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask,
opencv_core.Stream stream)
\brief Performs a per-element bitwise disjunction of two matrices (or of matrix and scalar).
|
static void |
opencv_core.bitwise_xor(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.bitwise_xor(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.bitwise_xor(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask)
\brief Calculates the per-element bit-wise "exclusive or" operation on two
arrays or an array and a scalar.
|
static void |
opencv_cudaarithm.bitwise_xor(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask,
opencv_core.Stream stream)
\brief Performs a per-element bitwise exclusive or operation of two matrices (or of matrix and scalar).
|
void |
opencv_stitching.Blender.blend(opencv_core.Mat dst,
opencv_core.Mat dst_mask)
\brief Blends and returns the final pano.
|
void |
opencv_stitching.FeatherBlender.blend(opencv_core.Mat dst,
opencv_core.Mat dst_mask) |
void |
opencv_stitching.MultiBandBlender.blend(opencv_core.Mat dst,
opencv_core.Mat dst_mask) |
static void |
opencv_cudaimgproc.blendLinear(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat weights1,
opencv_core.Mat weights2,
opencv_core.Mat result) |
static void |
opencv_imgproc.blendLinear(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat weights1,
opencv_core.Mat weights2,
opencv_core.Mat dst)
Performs linear blending of two images:
\f[ \texttt{dst}(i,j) = \texttt{weights1}(i,j)*\texttt{src1}(i,j) + \texttt{weights2}(i,j)*\texttt{src2}(i,j) \f]
|
static void |
opencv_cudaimgproc.blendLinear(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat weights1,
opencv_core.Mat weights2,
opencv_core.Mat result,
opencv_core.Stream stream)
\brief Performs linear blending of two images.
|
static opencv_core.Mat |
opencv_dnn.blobFromImage(opencv_core.Mat image) |
static opencv_core.Mat |
opencv_dnn.blobFromImage(opencv_core.Mat image,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth)
\brief Creates 4-dimensional blob from image.
|
static void |
opencv_dnn.blobFromImage(opencv_core.Mat image,
opencv_core.Mat blob) |
static void |
opencv_dnn.blobFromImage(opencv_core.Mat image,
opencv_core.Mat blob,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth)
\brief Creates 4-dimensional blob from image.
|
static void |
opencv_dnn.blobFromImages(opencv_core.GpuMatVector images,
opencv_core.Mat blob) |
static void |
opencv_dnn.blobFromImages(opencv_core.GpuMatVector images,
opencv_core.Mat blob,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth) |
static void |
opencv_dnn.blobFromImages(opencv_core.MatVector images,
opencv_core.Mat blob) |
static void |
opencv_dnn.blobFromImages(opencv_core.MatVector images,
opencv_core.Mat blob,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth)
\brief Creates 4-dimensional blob from series of images.
|
static void |
opencv_dnn.blobFromImages(opencv_core.UMatVector images,
opencv_core.Mat blob) |
static void |
opencv_dnn.blobFromImages(opencv_core.UMatVector images,
opencv_core.Mat blob,
double scalefactor,
opencv_core.Size size,
opencv_core.Scalar mean,
boolean swapRB,
boolean crop,
int ddepth) |
static void |
opencv_img_hash.blockMeanHash(opencv_core.Mat inputArr,
opencv_core.Mat outputArr) |
static void |
opencv_img_hash.blockMeanHash(opencv_core.Mat inputArr,
opencv_core.Mat outputArr,
int mode)
\brief Computes block mean hash of the input image
|
static void |
opencv_imgproc.blur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize) |
static void |
opencv_imgproc.blur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
opencv_core.Point anchor,
int borderType)
\brief Blurs an image using the normalized box filter.
|
static void |
opencv_xphoto.bm3dDenoising(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_xphoto.bm3dDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
float h,
int templateWindowSize,
int searchWindowSize,
int blockMatchingStep1,
int blockMatchingStep2,
int groupSize,
int slidingStep,
float beta,
int normType,
int step,
int transformType)
\brief Performs image denoising using the Block-Matching and 3D-filtering algorithm
|
static void |
opencv_xphoto.bm3dDenoising(opencv_core.Mat src,
opencv_core.Mat dstStep1,
opencv_core.Mat dstStep2) |
static void |
opencv_xphoto.bm3dDenoising(opencv_core.Mat src,
opencv_core.Mat dstStep1,
opencv_core.Mat dstStep2,
float h,
int templateWindowSize,
int searchWindowSize,
int blockMatchingStep1,
int blockMatchingStep2,
int groupSize,
int slidingStep,
float beta,
int normType,
int step,
int transformType)
\brief Performs image denoising using the Block-Matching and 3D-filtering algorithm
|
static opencv_core.Rect |
opencv_imgproc.boundingRect(opencv_core.Mat points)
\brief Calculates the up-right bounding rectangle of a point set.
|
static void |
opencv_imgproc.boxFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Size ksize) |
static void |
opencv_imgproc.boxFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType)
\brief Blurs an image using the box filter.
|
static void |
opencv_imgproc.boxPoints(opencv_core.RotatedRect box,
opencv_core.Mat points)
\brief Finds the four vertices of a rotated rect.
|
void |
opencv_flann.Index.build(opencv_core.Mat features,
opencv_flann.IndexParams params) |
void |
opencv_flann.Index.build(opencv_core.Mat features,
opencv_flann.IndexParams params,
int distType) |
void |
opencv_shape.HistogramCostExtractor.buildCostMatrix(opencv_core.Mat descriptors1,
opencv_core.Mat descriptors2,
opencv_core.Mat costMatrix) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.GpuMat xmap,
opencv_core.GpuMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.GpuMat xmap,
opencv_core.GpuMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.GpuMat xmap,
opencv_core.GpuMat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.GpuMat xmap,
opencv_core.GpuMat ymap) |
opencv_core.Rect |
opencv_stitching.RotationWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap)
\brief Builds the projection maps according to the given camera data.
|
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.AffineWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailSphericalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailCylindricalWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarperGpu.buildMaps(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.GpuMatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.GpuMatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.MatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage)
\brief Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
|
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.UMatVector pyramid,
opencv_core.Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(opencv_core.Mat img,
opencv_core.UMatVector pyramid,
opencv_core.Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage) |
static void |
opencv_core.buildOptionsAddMatrixDescription(org.bytedeco.javacpp.BytePointer buildOptions,
org.bytedeco.javacpp.BytePointer name,
opencv_core.Mat _m) |
static void |
opencv_core.buildOptionsAddMatrixDescription(String buildOptions,
String name,
opencv_core.Mat _m) |
static void |
opencv_imgproc.buildPyramid(opencv_core.Mat src,
opencv_core.GpuMatVector dst,
int maxlevel) |
static void |
opencv_imgproc.buildPyramid(opencv_core.Mat src,
opencv_core.GpuMatVector dst,
int maxlevel,
int borderType) |
static void |
opencv_imgproc.buildPyramid(opencv_core.Mat src,
opencv_core.MatVector dst,
int maxlevel) |
static void |
opencv_imgproc.buildPyramid(opencv_core.Mat src,
opencv_core.MatVector dst,
int maxlevel,
int borderType)
\brief Constructs the Gaussian pyramid for an image.
|
static void |
opencv_imgproc.buildPyramid(opencv_core.Mat src,
opencv_core.UMatVector dst,
int maxlevel) |
static void |
opencv_imgproc.buildPyramid(opencv_core.Mat src,
opencv_core.UMatVector dst,
int maxlevel,
int borderType) |
static void |
opencv_cudawarping.buildWarpAffineMaps(opencv_core.Mat M,
boolean inverse,
opencv_core.Size dsize,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
static void |
opencv_cudawarping.buildWarpAffineMaps(opencv_core.Mat M,
boolean inverse,
opencv_core.Size dsize,
opencv_core.Mat xmap,
opencv_core.Mat ymap,
opencv_core.Stream stream)
\brief Builds transformation maps for affine transformation.
|
static void |
opencv_cudawarping.buildWarpPerspectiveMaps(opencv_core.Mat M,
boolean inverse,
opencv_core.Size dsize,
opencv_core.Mat xmap,
opencv_core.Mat ymap) |
static void |
opencv_cudawarping.buildWarpPerspectiveMaps(opencv_core.Mat M,
boolean inverse,
opencv_core.Size dsize,
opencv_core.Mat xmap,
opencv_core.Mat ymap,
opencv_core.Stream stream)
\brief Builds transformation maps for perspective transformation.
|
opencv_aruco.Dictionary |
opencv_aruco.Dictionary.bytesList(opencv_core.Mat bytesList) |
opencv_core.MatExpr |
opencv_core.MatExpr.c(opencv_core.Mat c) |
void |
opencv_cudaoptflow.DenseOpticalFlow.calc(opencv_core.Mat I0,
opencv_core.Mat I1,
opencv_core.Mat flow) |
void |
opencv_superres.DenseOpticalFlowExt.calc(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat flow1) |
void |
opencv_video.DenseOpticalFlow.calc(opencv_core.Mat I0,
opencv_core.Mat I1,
opencv_core.Mat flow)
\brief Calculates an optical flow.
|
void |
opencv_superres.DenseOpticalFlowExt.calc(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat flow1,
opencv_core.Mat flow2) |
void |
opencv_cudaoptflow.SparseOpticalFlow.calc(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status) |
void |
opencv_video.SparseOpticalFlow.calc(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status) |
void |
opencv_video.SparseOpticalFlow.calc(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status,
opencv_core.Mat err)
\brief Calculates a sparse optical flow.
|
void |
opencv_cudaoptflow.SparseOpticalFlow.calc(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status,
opencv_core.Mat err,
opencv_core.Stream stream)
\brief Calculates a sparse optical flow.
|
void |
opencv_cudaoptflow.DenseOpticalFlow.calc(opencv_core.Mat I0,
opencv_core.Mat I1,
opencv_core.Mat flow,
opencv_core.Stream stream)
\brief Calculates a dense optical flow.
|
static void |
opencv_cudaarithm.calcAbsSum(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.calcAbsSum(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask,
opencv_core.Stream stream)
\overload
|
static void |
opencv_imgproc.calcBackProject(opencv_core.GpuMatVector images,
int[] channels,
opencv_core.Mat hist,
opencv_core.Mat dst,
float[] ranges,
double scale) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.GpuMat hist,
opencv_core.GpuMat backProject,
float[] ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.GpuMat hist,
opencv_core.GpuMat backProject,
float[] ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
float[] ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
float[] ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.SparseMat hist,
opencv_core.GpuMat backProject,
float[] ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.SparseMat hist,
opencv_core.GpuMat backProject,
float[] ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
float[] ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
float[] ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.SparseMat hist,
opencv_core.UMat backProject,
float[] ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.SparseMat hist,
opencv_core.UMat backProject,
float[] ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat hist,
opencv_core.UMat backProject,
float[] ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat hist,
opencv_core.UMat backProject,
float[] ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.GpuMat hist,
opencv_core.GpuMat backProject,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.GpuMat hist,
opencv_core.GpuMat backProject,
FloatBuffer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
FloatBuffer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.SparseMat hist,
opencv_core.GpuMat backProject,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.SparseMat hist,
opencv_core.GpuMat backProject,
FloatBuffer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
FloatBuffer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.SparseMat hist,
opencv_core.UMat backProject,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.SparseMat hist,
opencv_core.UMat backProject,
FloatBuffer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.UMat hist,
opencv_core.UMat backProject,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.UMat hist,
opencv_core.UMat backProject,
FloatBuffer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.GpuMat hist,
opencv_core.GpuMat backProject,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.GpuMat hist,
opencv_core.GpuMat backProject,
org.bytedeco.javacpp.FloatPointer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
org.bytedeco.javacpp.FloatPointer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat hist,
opencv_core.Mat backProject,
org.bytedeco.javacpp.PointerPointer ranges,
double scale,
boolean uniform)
\brief Calculates the back projection of a histogram.
|
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.GpuMat backProject,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.GpuMat backProject,
org.bytedeco.javacpp.FloatPointer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
org.bytedeco.javacpp.FloatPointer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.Mat backProject,
org.bytedeco.javacpp.PointerPointer ranges,
double scale,
boolean uniform)
\overload
|
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.UMat backProject,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.SparseMat hist,
opencv_core.UMat backProject,
org.bytedeco.javacpp.FloatPointer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.UMat hist,
opencv_core.UMat backProject,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcBackProject(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.UMat hist,
opencv_core.UMat backProject,
org.bytedeco.javacpp.FloatPointer ranges,
double scale,
boolean uniform) |
static void |
opencv_imgproc.calcBackProject(opencv_core.MatVector images,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat hist,
opencv_core.Mat dst,
org.bytedeco.javacpp.FloatPointer ranges,
double scale)
\overload
|
static void |
opencv_imgproc.calcBackProject(opencv_core.UMatVector images,
IntBuffer channels,
opencv_core.Mat hist,
opencv_core.Mat dst,
FloatBuffer ranges,
double scale) |
static float |
opencv_videostab.calcBlurriness(opencv_core.Mat frame)
\addtogroup videostab
\{
|
static void |
opencv_core.calcCovarMatrix(opencv_core.Mat samples,
int nsamples,
opencv_core.Mat covar,
opencv_core.Mat mean,
int flags) |
static void |
opencv_core.calcCovarMatrix(opencv_core.Mat samples,
int nsamples,
opencv_core.Mat covar,
opencv_core.Mat mean,
int flags,
int ctype)
\brief Calculates the covariance matrix of a set of vectors.
|
static void |
opencv_core.calcCovarMatrix(opencv_core.Mat samples,
opencv_core.Mat covar,
opencv_core.Mat mean,
int flags) |
static void |
opencv_core.calcCovarMatrix(opencv_core.Mat samples,
opencv_core.Mat covar,
opencv_core.Mat mean,
int flags,
int ctype)
\overload
\note use #COVAR_ROWS or #COVAR_COLS flag
|
float |
opencv_ml.StatModel.calcError(opencv_ml.TrainData data,
boolean test,
opencv_core.Mat resp)
\brief Computes error on the training or test dataset
|
static void |
opencv_videostab.calcFlowMask(opencv_core.Mat flowX,
opencv_core.Mat flowY,
opencv_core.Mat errors,
float maxError,
opencv_core.Mat mask0,
opencv_core.Mat mask1,
opencv_core.Mat flowMask) |
static double |
opencv_optflow.calcGlobalOrientation(opencv_core.Mat orientation,
opencv_core.Mat mask,
opencv_core.Mat mhi,
double timestamp,
double duration)
\brief Calculates a global motion orientation in a selected region.
|
static void |
opencv_imgproc.calcHist(opencv_core.GpuMatVector images,
int[] channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.GpuMatVector images,
int[] channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int[] histSize,
float[] ranges,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.GpuMat mask,
opencv_core.GpuMat hist,
int dims,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.GpuMat mask,
opencv_core.GpuMat hist,
int dims,
int[] histSize,
float[] ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.GpuMat mask,
opencv_core.SparseMat hist,
int dims,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.GpuMat mask,
opencv_core.SparseMat hist,
int dims,
int[] histSize,
float[] ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
int[] histSize,
float[] ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
int[] histSize,
float[] ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat mask,
opencv_core.SparseMat hist,
int dims,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat mask,
opencv_core.SparseMat hist,
int dims,
int[] histSize,
float[] ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int dims,
int[] histSize,
float[] ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
int[] channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int dims,
int[] histSize,
float[] ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.GpuMat mask,
opencv_core.GpuMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.GpuMat mask,
opencv_core.GpuMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.GpuMat mask,
opencv_core.SparseMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.GpuMat mask,
opencv_core.SparseMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.UMat mask,
opencv_core.SparseMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.UMat mask,
opencv_core.SparseMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
IntBuffer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int dims,
IntBuffer histSize,
FloatBuffer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.GpuMat mask,
opencv_core.GpuMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.GpuMat mask,
opencv_core.GpuMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.GpuMat mask,
opencv_core.SparseMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.GpuMat mask,
opencv_core.SparseMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.PointerPointer ranges,
boolean uniform,
boolean accumulate)
\brief Calculates a histogram of a set of arrays.
|
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat mask,
opencv_core.SparseMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.PointerPointer ranges,
boolean uniform,
boolean accumulate)
\overload
|
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.UMat mask,
opencv_core.SparseMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.UMat mask,
opencv_core.SparseMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.Mat images,
int nimages,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.UMat mask,
opencv_core.UMat hist,
int dims,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges,
boolean uniform,
boolean accumulate) |
static void |
opencv_cudaimgproc.calcHist(opencv_core.Mat src,
opencv_core.Mat hist) |
static void |
opencv_cudaimgproc.calcHist(opencv_core.Mat src,
opencv_core.Mat mask,
opencv_core.Mat hist) |
static void |
opencv_cudaimgproc.calcHist(opencv_core.Mat src,
opencv_core.Mat mask,
opencv_core.Mat hist,
opencv_core.Stream stream)
\brief Calculates histogram for one channel 8-bit image confined in given mask.
|
static void |
opencv_cudaimgproc.calcHist(opencv_core.Mat src,
opencv_core.Mat hist,
opencv_core.Stream stream)
\} cudaimgproc_color
|
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.MatVector images,
org.bytedeco.javacpp.IntPointer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.FloatPointer ranges,
boolean accumulate)
\overload
|
static void |
opencv_imgproc.calcHist(opencv_core.UMatVector images,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
IntBuffer histSize,
FloatBuffer ranges) |
static void |
opencv_imgproc.calcHist(opencv_core.UMatVector images,
IntBuffer channels,
opencv_core.Mat mask,
opencv_core.Mat hist,
IntBuffer histSize,
FloatBuffer ranges,
boolean accumulate) |
static void |
opencv_bgsegm.BackgroundSubtractorLSBPDesc.calcLocalSVDValues(opencv_core.GpuMat localSVDValues,
opencv_core.Mat frame) |
static void |
opencv_bgsegm.BackgroundSubtractorLSBPDesc.calcLocalSVDValues(opencv_core.Mat localSVDValues,
opencv_core.Mat frame) |
static void |
opencv_bgsegm.BackgroundSubtractorLSBPDesc.calcLocalSVDValues(opencv_core.UMat localSVDValues,
opencv_core.Mat frame) |
static void |
opencv_optflow.calcMotionGradient(opencv_core.Mat mhi,
opencv_core.Mat mask,
opencv_core.Mat orientation,
double delta1,
double delta2) |
static void |
opencv_optflow.calcMotionGradient(opencv_core.Mat mhi,
opencv_core.Mat mask,
opencv_core.Mat orientation,
double delta1,
double delta2,
int apertureSize)
\brief Calculates a gradient orientation of a motion history image.
|
static void |
opencv_cudaarithm.calcNorm(opencv_core.Mat src,
opencv_core.Mat dst,
int normType) |
static void |
opencv_cudaarithm.calcNorm(opencv_core.Mat src,
opencv_core.Mat dst,
int normType,
opencv_core.Mat mask,
opencv_core.Stream stream)
\overload
|
static void |
opencv_cudaarithm.calcNormDiff(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.calcNormDiff(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int normType,
opencv_core.Stream stream)
\overload
|
static void |
opencv_video.calcOpticalFlowFarneback(opencv_core.Mat prev,
opencv_core.Mat next,
opencv_core.Mat flow,
double pyr_scale,
int levels,
int winsize,
int iterations,
int poly_n,
double poly_sigma,
int flags)
\brief Computes a dense optical flow using the Gunnar Farneback's algorithm.
|
static void |
opencv_video.calcOpticalFlowPyrLK(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status,
opencv_core.Mat err) |
static void |
opencv_video.calcOpticalFlowPyrLK(opencv_core.Mat prevImg,
opencv_core.Mat nextImg,
opencv_core.Mat prevPts,
opencv_core.Mat nextPts,
opencv_core.Mat status,
opencv_core.Mat err,
opencv_core.Size winSize,
int maxLevel,
opencv_core.TermCriteria criteria,
int flags,
double minEigThreshold)
\brief Calculates an optical flow for a sparse feature set using the iterative Lucas-Kanade method with
pyramids.
|
static void |
opencv_optflow.calcOpticalFlowSF(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat flow,
int layers,
int averaging_block_size,
int max_flow)
\addtogroup optflow
\{
|
static void |
opencv_optflow.calcOpticalFlowSF(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat flow,
int layers,
int averaging_block_size,
int max_flow,
double sigma_dist,
double sigma_color,
int postprocess_window,
double sigma_dist_fix,
double sigma_color_fix,
double occ_thr,
int upscale_averaging_radius,
double upscale_sigma_dist,
double upscale_sigma_color,
double speed_up_thr)
\brief Calculate an optical flow using "SimpleFlow" algorithm.
|
static void |
opencv_optflow.calcOpticalFlowSparseToDense(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat flow) |
static void |
opencv_optflow.calcOpticalFlowSparseToDense(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat flow,
int grid_step,
int k,
float sigma,
boolean use_post_proc,
float fgs_lambda,
float fgs_sigma)
\brief Fast dense optical flow based on PyrLK sparse matches interpolation.
|
static void |
opencv_cudaarithm.calcSqrSum(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.calcSqrSum(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask,
opencv_core.Stream stream)
\overload
|
static void |
opencv_cudaarithm.calcSum(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.calcSum(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask,
opencv_core.Stream stream)
\overload
|
opencv_core.Point |
opencv_photo.AlignMTB.calculateShift(opencv_core.Mat img0,
opencv_core.Mat img1)
\brief Calculates shift between two images, i.
|
void |
opencv_optflow.VariationalRefinement.calcUV(opencv_core.Mat I0,
opencv_core.Mat I1,
opencv_core.Mat flow_u,
opencv_core.Mat flow_v)
\brief \ref calc function overload to handle separate horizontal (u) and vertical (v) flow components
(to avoid extra splits/merges)
|
static double |
opencv_calib3d.calibrate(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints,
opencv_core.Size image_size,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs) |
static double |
opencv_calib3d.calibrate(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints,
opencv_core.Size image_size,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size image_size,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs) |
static double |
opencv_calib3d.calibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size image_size,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria)
\brief Performs camera calibaration
|
static double |
opencv_calib3d.calibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size image_size,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs) |
static double |
opencv_calib3d.calibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size image_size,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria)
\overload double calibrateCamera( InputArrayOfArrays objectPoints,
InputArrayOfArrays imagePoints, Size imageSize,
InputOutputArray cameraMatrix, InputOutputArray distCoeffs,
OutputArrayOfArrays rvecs, OutputArrayOfArrays tvecs,
OutputArray stdDeviations, OutputArray perViewErrors,
int flags = 0, TermCriteria criteria = TermCriteria(
TermCriteria::COUNT + TermCriteria::EPS, 30, DBL_EPSILON) )
|
static double |
opencv_calib3d.calibrateCamera(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraAruco(opencv_core.GpuMatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static double |
opencv_aruco.calibrateCameraAruco(opencv_core.GpuMatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraAruco(opencv_core.MatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static double |
opencv_aruco.calibrateCameraAruco(opencv_core.MatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria)
\brief It's the same function as #calibrateCameraAruco but without calibration error estimation.
|
static double |
opencv_aruco.calibrateCameraAruco(opencv_core.UMatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static double |
opencv_aruco.calibrateCameraAruco(opencv_core.UMatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraArucoExtended(opencv_core.GpuMatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_aruco.calibrateCameraArucoExtended(opencv_core.GpuMatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraArucoExtended(opencv_core.MatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_aruco.calibrateCameraArucoExtended(opencv_core.MatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria)
\brief Calibrate a camera using aruco markers
|
static double |
opencv_aruco.calibrateCameraArucoExtended(opencv_core.UMatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_aruco.calibrateCameraArucoExtended(opencv_core.UMatVector corners,
opencv_core.Mat ids,
opencv_core.Mat counter,
opencv_aruco.Board board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraCharuco(opencv_core.GpuMatVector charucoCorners,
opencv_core.GpuMatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static double |
opencv_aruco.calibrateCameraCharuco(opencv_core.GpuMatVector charucoCorners,
opencv_core.GpuMatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraCharuco(opencv_core.MatVector charucoCorners,
opencv_core.MatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static double |
opencv_aruco.calibrateCameraCharuco(opencv_core.MatVector charucoCorners,
opencv_core.MatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags,
opencv_core.TermCriteria criteria)
\brief It's the same function as #calibrateCameraCharuco but without calibration error estimation.
|
static double |
opencv_aruco.calibrateCameraCharuco(opencv_core.UMatVector charucoCorners,
opencv_core.UMatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static double |
opencv_aruco.calibrateCameraCharuco(opencv_core.UMatVector charucoCorners,
opencv_core.UMatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraCharucoExtended(opencv_core.GpuMatVector charucoCorners,
opencv_core.GpuMatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_aruco.calibrateCameraCharucoExtended(opencv_core.GpuMatVector charucoCorners,
opencv_core.GpuMatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraCharucoExtended(opencv_core.MatVector charucoCorners,
opencv_core.MatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_aruco.calibrateCameraCharucoExtended(opencv_core.MatVector charucoCorners,
opencv_core.MatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria)
\brief Calibrate a camera using Charuco corners
|
static double |
opencv_aruco.calibrateCameraCharucoExtended(opencv_core.UMatVector charucoCorners,
opencv_core.UMatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_aruco.calibrateCameraCharucoExtended(opencv_core.UMatVector charucoCorners,
opencv_core.UMatVector charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria)
\brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraExtended(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints,
opencv_core.Size imageSize,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
opencv_core.Mat stdDeviationsIntrinsics,
opencv_core.Mat stdDeviationsExtrinsics,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static boolean |
opencv_stitching.calibrateRotatingCamera(opencv_core.MatVector Hs,
opencv_core.Mat K) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.Mat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
double[] fovx,
double[] fovy,
double[] focalLength,
opencv_core.Point2d principalPoint,
double[] aspectRatio) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.Mat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
DoubleBuffer fovx,
DoubleBuffer fovy,
DoubleBuffer focalLength,
opencv_core.Point2d principalPoint,
DoubleBuffer aspectRatio) |
static void |
opencv_calib3d.calibrationMatrixValues(opencv_core.Mat cameraMatrix,
opencv_core.Size imageSize,
double apertureWidth,
double apertureHeight,
org.bytedeco.javacpp.DoublePointer fovx,
org.bytedeco.javacpp.DoublePointer fovy,
org.bytedeco.javacpp.DoublePointer focalLength,
opencv_core.Point2d principalPoint,
org.bytedeco.javacpp.DoublePointer aspectRatio)
\brief Computes useful camera characteristics from the camera matrix.
|
void |
opencv_tracking.TrackerKCF.Arg0_Mat_Rect_Mat.call(opencv_core.Mat arg0,
opencv_core.Rect arg1,
opencv_core.Mat arg2) |
static opencv_core.RotatedRect |
opencv_video.CamShift(opencv_core.Mat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria)
\brief Finds an object center, size, and orientation.
|
static void |
opencv_imgproc.Canny(opencv_core.Mat image,
opencv_core.Mat edges,
double threshold1,
double threshold2) |
static void |
opencv_imgproc.Canny(opencv_core.Mat image,
opencv_core.Mat edges,
double threshold1,
double threshold2,
int apertureSize,
boolean L2gradient)
\brief Finds edges in an image using the Canny algorithm \cite Canny86 .
|
static void |
opencv_imgproc.Canny(opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Mat edges,
double threshold1,
double threshold2) |
static void |
opencv_imgproc.Canny(opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Mat edges,
double threshold1,
double threshold2,
boolean L2gradient)
\overload
|
static void |
opencv_core.cartToPolar(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude,
opencv_core.Mat angle) |
static void |
opencv_cudaarithm.cartToPolar(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude,
opencv_core.Mat angle) |
static void |
opencv_core.cartToPolar(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude,
opencv_core.Mat angle,
boolean angleInDegrees)
\brief Calculates the magnitude and angle of 2D vectors.
|
static void |
opencv_cudaarithm.cartToPolar(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude,
opencv_core.Mat angle,
boolean angleInDegrees,
opencv_core.Stream stream)
\brief Converts Cartesian coordinates into polar.
|
static int |
opencv_core.checkOptimalVectorWidth(int[] vectorWidths,
opencv_core.Mat src1) |
static int |
opencv_core.checkOptimalVectorWidth(int[] vectorWidths,
opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat src3,
opencv_core.Mat src4,
opencv_core.Mat src5,
opencv_core.Mat src6,
opencv_core.Mat src7,
opencv_core.Mat src8,
opencv_core.Mat src9,
int strat) |
static int |
opencv_core.checkOptimalVectorWidth(IntBuffer vectorWidths,
opencv_core.Mat src1) |
static int |
opencv_core.checkOptimalVectorWidth(IntBuffer vectorWidths,
opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat src3,
opencv_core.Mat src4,
opencv_core.Mat src5,
opencv_core.Mat src6,
opencv_core.Mat src7,
opencv_core.Mat src8,
opencv_core.Mat src9,
int strat) |
static int |
opencv_core.checkOptimalVectorWidth(org.bytedeco.javacpp.IntPointer vectorWidths,
opencv_core.Mat src1) |
static int |
opencv_core.checkOptimalVectorWidth(org.bytedeco.javacpp.IntPointer vectorWidths,
opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat src3,
opencv_core.Mat src4,
opencv_core.Mat src5,
opencv_core.Mat src6,
opencv_core.Mat src7,
opencv_core.Mat src8,
opencv_core.Mat src9,
int strat) |
static boolean |
opencv_core.checkRange(opencv_core.Mat a) |
static boolean |
opencv_core.checkRange(opencv_core.Mat a,
boolean quiet,
opencv_core.Point pos,
double minVal,
double maxVal)
\brief Checks every element of an input array for invalid values.
|
static void |
opencv_imgproc.circle(opencv_core.Mat img,
opencv_core.Point center,
int radius,
opencv_core.Scalar color) |
static void |
opencv_imgproc.circle(opencv_core.Mat img,
opencv_core.Point center,
int radius,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
\brief Draws a circle.
|
org.bytedeco.javacpp.FloatPointer |
opencv_tracking.ClfMilBoost.classify(opencv_core.Mat x) |
org.bytedeco.javacpp.FloatPointer |
opencv_tracking.ClfMilBoost.classify(opencv_core.Mat x,
boolean logR) |
boolean |
opencv_tracking.ClfOnlineStump.classify(opencv_core.Mat x,
int i) |
float |
opencv_tracking.ClfOnlineStump.classifyF(opencv_core.Mat x,
int i) |
org.bytedeco.javacpp.FloatPointer |
opencv_tracking.ClfOnlineStump.classifySetF(opencv_core.Mat x) |
opencv_core.Mat |
opencv_features2d.BOWTrainer.cluster(opencv_core.Mat descriptors)
\brief Clusters train descriptors.
|
opencv_core.Mat |
opencv_features2d.BOWKMeansTrainer.cluster(opencv_core.Mat descriptors) |
static void |
opencv_photo.colorChange(opencv_core.Mat src,
opencv_core.Mat mask,
opencv_core.Mat dst) |
static void |
opencv_photo.colorChange(opencv_core.Mat src,
opencv_core.Mat mask,
opencv_core.Mat dst,
float red_mul,
float green_mul,
float blue_mul)
\brief Given an original color image, two differently colored versions of this image can be mixed
seamlessly.
|
static void |
opencv_img_hash.colorMomentHash(opencv_core.Mat inputArr,
opencv_core.Mat outputArr)
\brief Computes color moment hash of the input, the algorithm
is come from the paper "Perceptual Hashing for Color Images
Using Invariant Moments"
|
double |
opencv_img_hash.ImgHashBase.compare(opencv_core.Mat hashOne,
opencv_core.Mat hashTwo)
\brief Compare the hash value between inOne and inTwo
|
static void |
opencv_core.compare(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int cmpop)
\brief Performs the per-element comparison of two arrays or an array and scalar value.
|
static void |
opencv_cudaarithm.compare(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int cmpop) |
static void |
opencv_cudaarithm.compare(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int cmpop,
opencv_core.Stream stream)
\brief Compares elements of two matrices (or of a matrix and scalar).
|
static double |
opencv_imgproc.compareHist(opencv_core.Mat H1,
opencv_core.Mat H2,
int method)
\brief Compares two histograms.
|
int |
opencv_imgproc.LineSegmentDetector.compareSegments(opencv_core.Size size,
opencv_core.Mat lines1,
opencv_core.Mat lines2) |
int |
opencv_imgproc.LineSegmentDetector.compareSegments(opencv_core.Size size,
opencv_core.Mat lines1,
opencv_core.Mat lines2,
opencv_core.Mat _image)
\brief Draws two groups of lines in blue and red, counting the non overlapping (mismatching) pixels.
|
static void |
opencv_videostab.completeFrameAccordingToFlow(opencv_core.Mat flowMask,
opencv_core.Mat flowX,
opencv_core.Mat flowY,
opencv_core.Mat frame1,
opencv_core.Mat mask1,
float distThresh,
opencv_core.Mat frame0,
opencv_core.Mat mask0) |
static void |
opencv_core.completeSymm(opencv_core.Mat m) |
static void |
opencv_core.completeSymm(opencv_core.Mat m,
boolean lowerToUpper)
\brief Copies the lower or the upper half of a square matrix to its another half.
|
int |
opencv_stitching.Stitcher.composePanorama(opencv_core.GpuMatVector images,
opencv_core.Mat pano) |
int |
opencv_stitching.Stitcher.composePanorama(opencv_core.Mat pano)
\overload
|
int |
opencv_stitching.Stitcher.composePanorama(opencv_core.MatVector images,
opencv_core.Mat pano)
\brief These functions try to compose the given images (or images stored internally from the other function
calls) into the final pano under the assumption that the image transformations were estimated
before.
|
int |
opencv_stitching.Stitcher.composePanorama(opencv_core.UMatVector images,
opencv_core.Mat pano) |
static void |
opencv_calib3d.composeRT(opencv_core.Mat rvec1,
opencv_core.Mat tvec1,
opencv_core.Mat rvec2,
opencv_core.Mat tvec2,
opencv_core.Mat rvec3,
opencv_core.Mat tvec3) |
static void |
opencv_calib3d.composeRT(opencv_core.Mat rvec1,
opencv_core.Mat tvec1,
opencv_core.Mat rvec2,
opencv_core.Mat tvec2,
opencv_core.Mat rvec3,
opencv_core.Mat tvec3,
opencv_core.Mat dr3dr1,
opencv_core.Mat dr3dt1,
opencv_core.Mat dr3dr2,
opencv_core.Mat dr3dt2,
opencv_core.Mat dt3dr1,
opencv_core.Mat dt3dt1,
opencv_core.Mat dt3dr2,
opencv_core.Mat dt3dt2)
\brief Combines two rotation-and-shift transformations.
|
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.GpuMat image,
opencv_core.KeyPointVector keypoints,
opencv_core.GpuMat imgDescriptor,
opencv_core.IntVectorVector pointIdxsOfClusters,
opencv_core.Mat descriptors) |
static void |
opencv_bgsegm.BackgroundSubtractorLSBPDesc.compute(opencv_core.GpuMat desc,
opencv_core.Mat frame,
opencv_core.Point LSBPSamplePoints) |
void |
opencv_core.LDA.compute(opencv_core.GpuMatVector src,
opencv_core.Mat labels) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
float[] descriptors) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
float[] descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
FloatBuffer descriptors) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
FloatBuffer descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector locations) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
org.bytedeco.javacpp.FloatPointer descriptors) |
void |
opencv_objdetect.HOGDescriptor.compute(opencv_core.Mat img,
org.bytedeco.javacpp.FloatPointer descriptors,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector locations)
\brief Computes HOG descriptors of given image.
|
void |
opencv_xfeatures2d.DAISY.compute(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
opencv_core.Mat descriptors)
\overload
|
void |
opencv_features2d.Feature2D.compute(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
opencv_core.Mat descriptors)
\brief Computes the descriptors for a set of keypoints detected in an image (first variant) or image set
(second variant).
|
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
opencv_core.Mat imgDescriptor) |
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
opencv_core.Mat imgDescriptor,
opencv_core.IntVectorVector pointIdxsOfClusters,
opencv_core.Mat descriptors)
\brief Computes an image descriptor using the set visual vocabulary.
|
void |
opencv_xfeatures2d.DAISY.compute(opencv_core.Mat image,
opencv_core.Mat descriptors)
\overload
|
static void |
opencv_core.SVD.compute(opencv_core.Mat src,
opencv_core.Mat w) |
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.Mat keypointDescriptors,
opencv_core.Mat imgDescriptor) |
void |
opencv_cudaobjdetect.HOG.compute(opencv_core.Mat img,
opencv_core.Mat descriptors) |
void |
opencv_img_hash.ImgHashBase.compute(opencv_core.Mat inputArr,
opencv_core.Mat outputArr)
\brief Computes hash of the input image
|
void |
opencv_cudaarithm.DFT.compute(opencv_core.Mat image,
opencv_core.Mat result) |
void |
opencv_cudaimgproc.CornernessCriteria.compute(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.SVD.compute(opencv_core.Mat src,
opencv_core.Mat w,
int flags)
\overload
computes singular values of a matrix
|
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.Mat keypointDescriptors,
opencv_core.Mat imgDescriptor,
opencv_core.IntVectorVector pointIdxsOfClusters)
\overload
|
void |
opencv_calib3d.StereoMatcher.compute(opencv_core.Mat left,
opencv_core.Mat right,
opencv_core.Mat disparity)
\brief Computes disparity map for the specified stereo pair
|
static void |
opencv_core.SVD.compute(opencv_core.Mat src,
opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt) |
static void |
opencv_core.SVD.compute(opencv_core.Mat src,
opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt,
int flags)
\brief decomposes matrix and stores the results to user-provided matrices
|
static void |
opencv_bgsegm.BackgroundSubtractorLSBPDesc.compute(opencv_core.Mat desc,
opencv_core.Mat frame,
opencv_core.Point LSBPSamplePoints) |
void |
opencv_cudaobjdetect.HOG.compute(opencv_core.Mat img,
opencv_core.Mat descriptors,
opencv_core.Stream stream)
\brief Returns block descriptors computed for the whole image.
|
void |
opencv_cudaarithm.DFT.compute(opencv_core.Mat image,
opencv_core.Mat result,
opencv_core.Stream stream)
\brief Computes an FFT of a given image.
|
void |
opencv_cudaimgproc.CornernessCriteria.compute(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Computes the cornerness criteria at each image pixel.
|
void |
opencv_xfeatures2d.DAISY.compute(opencv_core.Mat image,
opencv_core.Rect roi,
opencv_core.Mat descriptors)
\overload
|
void |
opencv_core.LDA.compute(opencv_core.MatVector src,
opencv_core.Mat labels)
Compute the discriminants for data in src (row aligned) and labels.
|
void |
opencv_tracking.TrackerFeature.compute(opencv_core.MatVector images,
opencv_core.Mat response)
\brief Compute the features in the images collection
|
void |
opencv_features2d.BOWImgDescriptorExtractor.compute(opencv_core.UMat image,
opencv_core.KeyPointVector keypoints,
opencv_core.UMat imgDescriptor,
opencv_core.IntVectorVector pointIdxsOfClusters,
opencv_core.Mat descriptors) |
static void |
opencv_bgsegm.BackgroundSubtractorLSBPDesc.compute(opencv_core.UMat desc,
opencv_core.Mat frame,
opencv_core.Point LSBPSamplePoints) |
void |
opencv_core.LDA.compute(opencv_core.UMatVector src,
opencv_core.Mat labels) |
static double |
opencv_ximgproc.computeBadPixelPercent(opencv_core.Mat GT,
opencv_core.Mat src,
opencv_core.Rect ROI) |
static double |
opencv_ximgproc.computeBadPixelPercent(opencv_core.Mat GT,
opencv_core.Mat src,
opencv_core.Rect ROI,
int thresh)
\brief Function for computing the percent of "bad" pixels in the disparity map
(pixels where error is higher than a specified threshold)
|
boolean |
opencv_saliency.StaticSaliency.computeBinaryMap(opencv_core.Mat _saliencyMap,
opencv_core.Mat _binaryMap)
\brief This function perform a binary map of given saliency map.
|
void |
opencv_photo.AlignMTB.computeBitmaps(opencv_core.Mat img,
opencv_core.Mat tb,
opencv_core.Mat eb)
\brief Computes median threshold and exclude bitmaps of given image.
|
static void |
opencv_calib3d.computeCorrespondEpilines(opencv_core.Mat points,
int whichImage,
opencv_core.Mat F,
opencv_core.Mat lines)
\brief For points in an image of a stereo pair, computes the corresponding epilines in the other image.
|
void |
opencv_structured_light.SinusoidalPattern.computeDataModulationTerm(opencv_core.GpuMatVector patternImages,
opencv_core.Mat dataModulationTerm,
opencv_core.Mat shadowMask) |
void |
opencv_structured_light.SinusoidalPattern.computeDataModulationTerm(opencv_core.MatVector patternImages,
opencv_core.Mat dataModulationTerm,
opencv_core.Mat shadowMask)
\brief compute the data modulation term.
|
void |
opencv_structured_light.SinusoidalPattern.computeDataModulationTerm(opencv_core.UMatVector patternImages,
opencv_core.Mat dataModulationTerm,
opencv_core.Mat shadowMask) |
float |
opencv_shape.ShapeDistanceExtractor.computeDistance(opencv_core.Mat contour1,
opencv_core.Mat contour2)
\brief Compute the shape distance between two shapes defined by its contours.
|
static void |
opencv_bgsegm.BackgroundSubtractorLSBPDesc.computeFromLocalSVDValues(opencv_core.GpuMat desc,
opencv_core.Mat localSVDValues,
opencv_core.Point LSBPSamplePoints) |
static void |
opencv_bgsegm.BackgroundSubtractorLSBPDesc.computeFromLocalSVDValues(opencv_core.Mat desc,
opencv_core.Mat localSVDValues,
opencv_core.Point LSBPSamplePoints) |
static void |
opencv_bgsegm.BackgroundSubtractorLSBPDesc.computeFromLocalSVDValues(opencv_core.UMat desc,
opencv_core.Mat localSVDValues,
opencv_core.Point LSBPSamplePoints) |
void |
opencv_objdetect.HOGDescriptor.computeGradient(opencv_core.Mat img,
opencv_core.Mat grad,
opencv_core.Mat angleOfs) |
void |
opencv_objdetect.HOGDescriptor.computeGradient(opencv_core.Mat img,
opencv_core.Mat grad,
opencv_core.Mat angleOfs,
opencv_core.Size paddingTL,
opencv_core.Size paddingBR)
\brief Computes gradients and quantized gradient orientations.
|
static double |
opencv_ximgproc.computeMSE(opencv_core.Mat GT,
opencv_core.Mat src,
opencv_core.Rect ROI)
\brief Function for computing mean square error for disparity maps
|
static void |
opencv_text.computeNMChannels(opencv_core.Mat _src,
opencv_core.GpuMatVector _channels) |
static void |
opencv_text.computeNMChannels(opencv_core.Mat _src,
opencv_core.GpuMatVector _channels,
int _mode) |
static void |
opencv_text.computeNMChannels(opencv_core.Mat _src,
opencv_core.MatVector _channels) |
static void |
opencv_text.computeNMChannels(opencv_core.Mat _src,
opencv_core.MatVector _channels,
int _mode)
\brief Compute the different channels to be processed independently in the N&M algorithm \cite Neumann12.
|
static void |
opencv_text.computeNMChannels(opencv_core.Mat _src,
opencv_core.UMatVector _channels) |
static void |
opencv_text.computeNMChannels(opencv_core.Mat _src,
opencv_core.UMatVector _channels,
int _mode) |
void |
opencv_ximgproc.StructuredEdgeDetection.computeOrientation(opencv_core.Mat _src,
opencv_core.Mat _dst)
\brief The function computes orientation from edge image.
|
void |
opencv_structured_light.SinusoidalPattern.computePhaseMap(opencv_core.GpuMatVector patternImages,
opencv_core.Mat wrappedPhaseMap) |
void |
opencv_structured_light.SinusoidalPattern.computePhaseMap(opencv_core.GpuMatVector patternImages,
opencv_core.Mat wrappedPhaseMap,
opencv_core.Mat shadowMask,
opencv_core.Mat fundamental) |
void |
opencv_structured_light.SinusoidalPattern.computePhaseMap(opencv_core.MatVector patternImages,
opencv_core.Mat wrappedPhaseMap) |
void |
opencv_structured_light.SinusoidalPattern.computePhaseMap(opencv_core.MatVector patternImages,
opencv_core.Mat wrappedPhaseMap,
opencv_core.Mat shadowMask,
opencv_core.Mat fundamental)
\brief Compute a wrapped phase map from sinusoidal patterns.
|
void |
opencv_structured_light.SinusoidalPattern.computePhaseMap(opencv_core.UMatVector patternImages,
opencv_core.Mat wrappedPhaseMap) |
void |
opencv_structured_light.SinusoidalPattern.computePhaseMap(opencv_core.UMatVector patternImages,
opencv_core.Mat wrappedPhaseMap,
opencv_core.Mat shadowMask,
opencv_core.Mat fundamental) |
float |
opencv_xfeatures2d.PCTSignaturesSQFD.computeQuadraticFormDistance(opencv_core.Mat _signature0,
opencv_core.Mat _signature1)
\brief Computes Signature Quadratic Form Distance of two signatures.
|
void |
opencv_xfeatures2d.PCTSignaturesSQFD.computeQuadraticFormDistances(opencv_core.Mat sourceSignature,
opencv_core.MatVector imageSignatures,
float[] distances) |
void |
opencv_xfeatures2d.PCTSignaturesSQFD.computeQuadraticFormDistances(opencv_core.Mat sourceSignature,
opencv_core.MatVector imageSignatures,
FloatBuffer distances) |
void |
opencv_xfeatures2d.PCTSignaturesSQFD.computeQuadraticFormDistances(opencv_core.Mat sourceSignature,
opencv_core.MatVector imageSignatures,
org.bytedeco.javacpp.FloatPointer distances)
\brief Computes Signature Quadratic Form Distance between the reference signature
and each of the other image signatures.
|
boolean |
opencv_saliency.Saliency.computeSaliency(opencv_core.Mat image,
opencv_core.Mat saliencyMap)
\brief Compute the saliency
|
boolean |
opencv_saliency.StaticSaliencySpectralResidual.computeSaliency(opencv_core.Mat image,
opencv_core.Mat saliencyMap) |
boolean |
opencv_saliency.StaticSaliencyFineGrained.computeSaliency(opencv_core.Mat image,
opencv_core.Mat saliencyMap) |
boolean |
opencv_saliency.MotionSaliencyBinWangApr2014.computeSaliency(opencv_core.Mat image,
opencv_core.Mat saliencyMap) |
boolean |
opencv_saliency.ObjectnessBING.computeSaliency(opencv_core.Mat image,
opencv_core.Mat saliencyMap) |
void |
opencv_xfeatures2d.PCTSignatures.computeSignature(opencv_core.Mat image,
opencv_core.Mat signature)
\brief Computes signature of given image.
|
static int |
opencv_imgproc.connectedComponents(opencv_core.Mat image,
opencv_core.Mat labels) |
static int |
opencv_imgproc.connectedComponents(opencv_core.Mat image,
opencv_core.Mat labels,
int connectivity,
int ltype)
\overload
|
static int |
opencv_imgproc.connectedComponentsWithAlgorithm(opencv_core.Mat image,
opencv_core.Mat labels,
int connectivity,
int ltype,
int ccltype)
\brief computes the connected components labeled image of boolean image
|
static int |
opencv_imgproc.connectedComponentsWithStats(opencv_core.Mat image,
opencv_core.Mat labels,
opencv_core.Mat stats,
opencv_core.Mat centroids) |
static int |
opencv_imgproc.connectedComponentsWithStats(opencv_core.Mat image,
opencv_core.Mat labels,
opencv_core.Mat stats,
opencv_core.Mat centroids,
int connectivity,
int ltype)
\overload
|
static int |
opencv_imgproc.connectedComponentsWithStatsWithAlgorithm(opencv_core.Mat image,
opencv_core.Mat labels,
opencv_core.Mat stats,
opencv_core.Mat centroids,
int connectivity,
int ltype,
int ccltype)
\brief computes the connected components labeled image of boolean image and also produces a statistics output for each label
|
static opencv_core.KernelArg |
opencv_core.KernelArg.Constant(opencv_core.Mat m) |
static double |
opencv_imgproc.contourArea(opencv_core.Mat contour) |
static double |
opencv_imgproc.contourArea(opencv_core.Mat contour,
boolean oriented)
\brief Calculates a contour area.
|
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.controlMatrix(opencv_core.Mat controlMatrix) |
void |
opencv_cudaobjdetect.CudaCascadeClassifier.convert(opencv_core.Mat gpu_objects,
opencv_core.RectVector objects)
\brief Converts objects array from internal representation to standard vector.
|
static void |
opencv_core.convertFp16(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Converts an array to half precision floating number.
|
static void |
opencv_imgproc.convertMaps(opencv_core.Mat map1,
opencv_core.Mat map2,
opencv_core.Mat dstmap1,
opencv_core.Mat dstmap2,
int dstmap1type) |
static void |
opencv_imgproc.convertMaps(opencv_core.Mat map1,
opencv_core.Mat map2,
opencv_core.Mat dstmap1,
opencv_core.Mat dstmap2,
int dstmap1type,
boolean nninterpolation)
\brief Converts image transformation maps from one representation to another.
|
static void |
opencv_calib3d.convertPointsFromHomogeneous(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Converts points from homogeneous to Euclidean space.
|
static void |
opencv_calib3d.convertPointsHomogeneous(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Converts points to/from homogeneous coordinates.
|
static void |
opencv_calib3d.convertPointsToHomogeneous(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Converts points from Euclidean to homogeneous space.
|
static void |
opencv_core.convertScaleAbs(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.convertScaleAbs(opencv_core.Mat src,
opencv_core.Mat dst,
double alpha,
double beta)
\brief Scales, calculates absolute values, and converts the result to 8-bit.
|
void |
opencv_core.GpuMat.convertTo(opencv_core.Mat dst,
int rtype)
converts GpuMat to another datatype (Blocking call)
|
void |
opencv_core.Mat.convertTo(opencv_core.Mat m,
int rtype) |
void |
opencv_core.UMat.convertTo(opencv_core.Mat m,
int rtype) |
void |
opencv_core.SparseMat.convertTo(opencv_core.Mat m,
int rtype) |
void |
opencv_core.GpuMat.convertTo(opencv_core.Mat dst,
int rtype,
double alpha) |
void |
opencv_core.GpuMat.convertTo(opencv_core.Mat dst,
int rtype,
double alpha,
double beta)
converts GpuMat to another datatype with scaling (Blocking call)
|
void |
opencv_core.Mat.convertTo(opencv_core.Mat m,
int rtype,
double alpha,
double beta)
\brief Converts an array to another data type with optional scaling.
|
void |
opencv_core.UMat.convertTo(opencv_core.Mat m,
int rtype,
double alpha,
double beta)
converts matrix to another datatype with optional scaling.
|
void |
opencv_core.SparseMat.convertTo(opencv_core.Mat m,
int rtype,
double alpha,
double beta)
converts sparse matrix to dense n-dim matrix with optional type conversion and scaling.
|
void |
opencv_core.GpuMat.convertTo(opencv_core.Mat dst,
int rtype,
double alpha,
double beta,
opencv_core.Stream stream)
converts GpuMat to another datatype with scaling (Non-Blocking call)
|
void |
opencv_core.GpuMat.convertTo(opencv_core.Mat dst,
int rtype,
double alpha,
opencv_core.Stream stream)
converts GpuMat to another datatype with scaling (Non-Blocking call)
|
void |
opencv_core.GpuMat.convertTo(opencv_core.Mat dst,
int rtype,
opencv_core.Stream stream)
converts GpuMat to another datatype (Non-Blocking call)
|
static void |
opencv_imgproc.convexHull(opencv_core.Mat points,
opencv_core.Mat hull) |
static void |
opencv_imgproc.convexHull(opencv_core.Mat points,
opencv_core.Mat hull,
boolean clockwise,
boolean returnPoints)
\brief Finds the convex hull of a point set.
|
static void |
opencv_imgproc.convexityDefects(opencv_core.Mat contour,
opencv_core.Mat convexhull,
opencv_core.Mat convexityDefects)
\brief Finds the convexity defects of a contour.
|
void |
opencv_cudaarithm.Convolution.convolve(opencv_core.Mat image,
opencv_core.Mat templ,
opencv_core.Mat result) |
void |
opencv_cudaarithm.Convolution.convolve(opencv_core.Mat image,
opencv_core.Mat templ,
opencv_core.Mat result,
boolean ccorr,
opencv_core.Stream stream)
\brief Computes a convolution (or cross-correlation) of two images.
|
static void |
opencv_core.copyMakeBorder(opencv_core.Mat src,
opencv_core.Mat dst,
int top,
int bottom,
int left,
int right,
int borderType) |
static void |
opencv_cudaarithm.copyMakeBorder(opencv_core.Mat src,
opencv_core.Mat dst,
int top,
int bottom,
int left,
int right,
int borderType) |
static void |
opencv_core.copyMakeBorder(opencv_core.Mat src,
opencv_core.Mat dst,
int top,
int bottom,
int left,
int right,
int borderType,
opencv_core.Scalar value)
\brief Forms a border around an image.
|
static void |
opencv_cudaarithm.copyMakeBorder(opencv_core.Mat src,
opencv_core.Mat dst,
int top,
int bottom,
int left,
int right,
int borderType,
opencv_core.Scalar value,
opencv_core.Stream stream)
\brief Forms a border around an image.
|
void |
opencv_core.Mat.copySize(opencv_core.Mat m)
internal use function; properly re-allocates _size, _step arrays
|
void |
opencv_core.GpuMat.copyTo(opencv_core.Mat dst)
copies the GpuMat content to device memory (Blocking call)
|
void |
opencv_core.Mat.copyTo(opencv_core.Mat m)
\brief Copies the matrix to another one.
|
void |
opencv_core.UMat.copyTo(opencv_core.Mat m)
copies the matrix content to "m".
|
void |
opencv_core.SparseMat.copyTo(opencv_core.Mat m)
converts sparse matrix to dense matrix.
|
void |
opencv_core.GpuMat.copyTo(opencv_core.Mat dst,
opencv_core.Mat mask)
copies those GpuMat elements to "m" that are marked with non-zero mask elements (Blocking call)
|
void |
opencv_core.Mat.copyTo(opencv_core.Mat m,
opencv_core.Mat mask)
\overload
|
void |
opencv_core.UMat.copyTo(opencv_core.Mat m,
opencv_core.Mat mask)
copies those matrix elements to "m" that are marked with non-zero mask elements.
|
void |
opencv_core.GpuMat.copyTo(opencv_core.Mat dst,
opencv_core.Mat mask,
opencv_core.Stream stream)
copies those GpuMat elements to "m" that are marked with non-zero mask elements (Non-Blocking call)
|
void |
opencv_core.GpuMat.copyTo(opencv_core.Mat dst,
opencv_core.Stream stream)
copies the GpuMat content to device memory (Non-Blocking call)
|
static void |
opencv_imgproc.cornerEigenValsAndVecs(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize,
int ksize) |
static void |
opencv_imgproc.cornerEigenValsAndVecs(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize,
int ksize,
int borderType)
\brief Calculates eigenvalues and eigenvectors of image blocks for corner detection.
|
static void |
opencv_imgproc.cornerHarris(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize,
int ksize,
double k) |
static void |
opencv_imgproc.cornerHarris(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize,
int ksize,
double k,
int borderType)
\brief Harris corner detector.
|
static void |
opencv_imgproc.cornerMinEigenVal(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize) |
static void |
opencv_imgproc.cornerMinEigenVal(opencv_core.Mat src,
opencv_core.Mat dst,
int blockSize,
int ksize,
int borderType)
\brief Calculates the minimal eigenvalue of gradient matrices for corner detection.
|
static void |
opencv_imgproc.cornerSubPix(opencv_core.Mat image,
opencv_core.Mat corners,
opencv_core.Size winSize,
opencv_core.Size zeroZone,
opencv_core.TermCriteria criteria)
\brief Refines the corner locations.
|
opencv_core.Mat |
opencv_video.KalmanFilter.correct(opencv_core.Mat measurement)
\brief Updates the predicted state from the measurement.
|
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.correct(opencv_core.Mat measurement)
The function performs correction step of the algorithm
|
static void |
opencv_calib3d.correctMatches(opencv_core.Mat F,
opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat newPoints1,
opencv_core.Mat newPoints2)
\brief Refines coordinates of corresponding points.
|
static int |
opencv_core.countNonZero(opencv_core.Mat src)
\brief Counts non-zero array elements.
|
static int |
opencv_cudaarithm.countNonZero(opencv_core.Mat src)
\brief Counts non-zero matrix elements.
|
static void |
opencv_cudaarithm.countNonZero(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.countNonZero(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\overload
|
static void |
opencv_ximgproc.covarianceEstimation(opencv_core.Mat src,
opencv_core.Mat dst,
int windowRows,
int windowCols)
\brief Computes the estimated covariance matrix of an image using the sliding
window forumlation.
|
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(org.bytedeco.javacpp.BytePointer filename,
org.bytedeco.javacpp.BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(org.bytedeco.javacpp.BytePointer filename,
org.bytedeco.javacpp.BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(org.bytedeco.javacpp.BytePointer filename,
org.bytedeco.javacpp.BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode,
int classifier)
\brief Creates an instance of the OCRHMMDecoder class.
|
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(org.bytedeco.javacpp.BytePointer filename,
org.bytedeco.javacpp.BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode,
int beam_size)
\brief Creates an instance of the OCRBeamSearchDecoder class.
|
static opencv_xfeatures2d.DAISY |
opencv_xfeatures2d.DAISY.create(float radius,
int q_radius,
int q_theta,
int q_hist,
int norm,
opencv_core.Mat H,
boolean interpolation,
boolean use_orientation) |
static opencv_aruco.Board |
opencv_aruco.Board.create(opencv_core.GpuMatVector objPoints,
opencv_aruco.Dictionary dictionary,
opencv_core.Mat ids) |
static opencv_plot.Plot2d |
opencv_plot.Plot2d.create(opencv_core.Mat data)
\brief Creates Plot2d object
|
static opencv_ml.TrainData |
opencv_ml.TrainData.create(opencv_core.Mat samples,
int layout,
opencv_core.Mat responses) |
static opencv_ml.TrainData |
opencv_ml.TrainData.create(opencv_core.Mat samples,
int layout,
opencv_core.Mat responses,
opencv_core.Mat varIdx,
opencv_core.Mat sampleIdx,
opencv_core.Mat sampleWeights,
opencv_core.Mat varType)
\brief Creates training data from in-memory arrays.
|
static opencv_plot.Plot2d |
opencv_plot.Plot2d.create(opencv_core.Mat dataX,
opencv_core.Mat dataY)
\brief Creates Plot2d object
|
static opencv_aruco.Board |
opencv_aruco.Board.create(opencv_core.MatVector objPoints,
opencv_aruco.Dictionary dictionary,
opencv_core.Mat ids)
\brief Provide way to create Board by passing nessesary data.
|
static opencv_core.DownhillSolver |
opencv_core.DownhillSolver.create(opencv_core.MinProblemSolver.Function f,
opencv_core.Mat initStep,
opencv_core.TermCriteria termcrit)
\brief This function returns the reference to the ready-to-use DownhillSolver object.
|
static opencv_aruco.Board |
opencv_aruco.Board.create(opencv_core.UMatVector objPoints,
opencv_aruco.Dictionary dictionary,
opencv_core.Mat ids) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
org.bytedeco.javacpp.BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
org.bytedeco.javacpp.BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode,
int beam_size)
\brief Creates an instance of the OCRBeamSearchDecoder class.
|
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(opencv_text.OCRBeamSearchDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode,
int beam_size) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
org.bytedeco.javacpp.BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
org.bytedeco.javacpp.BytePointer vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode)
\brief Creates an instance of the OCRHMMDecoder class.
|
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(opencv_text.OCRHMMDecoder.ClassifierCallback classifier,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(String filename,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(String filename,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table) |
static opencv_text.OCRHMMDecoder |
opencv_text.OCRHMMDecoder.create(String filename,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode,
int classifier) |
static opencv_text.OCRBeamSearchDecoder |
opencv_text.OCRBeamSearchDecoder.create(String filename,
String vocabulary,
opencv_core.Mat transition_probabilities_table,
opencv_core.Mat emission_probabilities_table,
int mode,
int beam_size) |
static void |
opencv_ml.createConcentricSpheresTestSet(int nsamples,
int nfeatures,
int nclasses,
opencv_core.Mat samples,
opencv_core.Mat responses)
\brief Creates test set
|
static void |
opencv_core.createContinuous(int rows,
int cols,
int type,
opencv_core.Mat arr)
\brief Creates a continuous matrix.
|
static opencv_ximgproc.DTFilter |
opencv_ximgproc.createDTFilter(opencv_core.Mat guide,
double sigmaSpatial,
double sigmaColor) |
static opencv_ximgproc.DTFilter |
opencv_ximgproc.createDTFilter(opencv_core.Mat guide,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters)
\brief Factory method, create instance of DTFilter and produce initialization routines.
|
static opencv_ximgproc.FastGlobalSmootherFilter |
opencv_ximgproc.createFastGlobalSmootherFilter(opencv_core.Mat guide,
double lambda,
double sigma_color) |
static opencv_ximgproc.FastGlobalSmootherFilter |
opencv_ximgproc.createFastGlobalSmootherFilter(opencv_core.Mat guide,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter)
\brief Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.
|
static opencv_ximgproc.GuidedFilter |
opencv_ximgproc.createGuidedFilter(opencv_core.Mat guide,
int radius,
double eps)
\brief Factory method, create instance of GuidedFilter and produce initialization routines.
|
static void |
opencv_imgproc.createHanningWindow(opencv_core.Mat dst,
opencv_core.Size winSize,
int type)
\brief This function computes a Hanning window coefficients in two dimensions.
|
static void |
opencv_stitching.createLaplacePyr(opencv_core.Mat img,
int num_levels,
opencv_core.UMatVector pyr) |
static void |
opencv_stitching.createLaplacePyrGpu(opencv_core.Mat img,
int num_levels,
opencv_core.UMatVector pyr) |
static opencv_cudafilters.Filter |
opencv_cudafilters.createLinearFilter(int srcType,
int dstType,
opencv_core.Mat kernel) |
static opencv_cudafilters.Filter |
opencv_cudafilters.createLinearFilter(int srcType,
int dstType,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int borderMode,
opencv_core.Scalar borderVal)
\brief Creates a non-separable linear 2D filter.
|
static opencv_cudaarithm.LookUpTable |
opencv_cudaarithm.createLookUpTable(opencv_core.Mat lut)
\brief Creates implementation for cuda::LookUpTable .
|
static opencv_cudafilters.Filter |
opencv_cudafilters.createMorphologyFilter(int op,
int srcType,
opencv_core.Mat kernel) |
static opencv_cudafilters.Filter |
opencv_cudafilters.createMorphologyFilter(int op,
int srcType,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int iterations)
\brief Creates a 2D morphological filter.
|
static void |
opencv_text.createOCRHMMTransitionsTable(org.bytedeco.javacpp.BytePointer vocabulary,
opencv_text.StdStringVector lexicon,
opencv_core.Mat transition_probabilities_table)
\}
|
static void |
opencv_text.createOCRHMMTransitionsTable(String vocabulary,
opencv_text.StdStringVector lexicon,
opencv_core.Mat transition_probabilities_table) |
static opencv_cudafilters.Filter |
opencv_cudafilters.createSeparableLinearFilter(int srcType,
int dstType,
opencv_core.Mat rowKernel,
opencv_core.Mat columnKernel) |
static opencv_cudafilters.Filter |
opencv_cudafilters.createSeparableLinearFilter(int srcType,
int dstType,
opencv_core.Mat rowKernel,
opencv_core.Mat columnKernel,
opencv_core.Point anchor,
int rowBorderMode,
int columnBorderMode)
\brief Creates a separable linear filter.
|
static opencv_ximgproc.SuperpixelLSC |
opencv_ximgproc.createSuperpixelLSC(opencv_core.Mat image) |
static opencv_ximgproc.SuperpixelLSC |
opencv_ximgproc.createSuperpixelLSC(opencv_core.Mat image,
int region_size,
float ratio)
\brief Class implementing the LSC (Linear Spectral Clustering) superpixels
|
static opencv_ximgproc.SuperpixelSLIC |
opencv_ximgproc.createSuperpixelSLIC(opencv_core.Mat image) |
static opencv_ximgproc.SuperpixelSLIC |
opencv_ximgproc.createSuperpixelSLIC(opencv_core.Mat image,
int algorithm,
int region_size,
float ruler)
\brief Initialize a SuperpixelSLIC object
|
static opencv_bgsegm.SyntheticSequenceGenerator |
opencv_bgsegm.createSyntheticSequenceGenerator(opencv_core.Mat background,
opencv_core.Mat object) |
static opencv_bgsegm.SyntheticSequenceGenerator |
opencv_bgsegm.createSyntheticSequenceGenerator(opencv_core.Mat background,
opencv_core.Mat object,
double amplitude,
double wavelength,
double wavespeed,
double objspeed)
\brief Creates an instance of SyntheticSequenceGenerator.
|
static void |
opencv_stitching.createWeightMap(opencv_core.Mat mask,
float sharpness,
opencv_core.Mat weight) |
opencv_core.Mat |
opencv_core.Mat.cross(opencv_core.Mat m)
\brief Computes a cross-product of two 3-element vectors.
|
opencv_core.Mat |
opencv_core.MatExpr.cross(opencv_core.Mat m) |
static opencv_core.CvMat |
opencv_core.cvMat(opencv_core.Mat m) |
static void |
opencv_cudaimgproc.cvtColor(opencv_core.Mat src,
opencv_core.Mat dst,
int code) |
static void |
opencv_imgproc.cvtColor(opencv_core.Mat src,
opencv_core.Mat dst,
int code) |
static void |
opencv_imgproc.cvtColor(opencv_core.Mat src,
opencv_core.Mat dst,
int code,
int dstCn)
\brief Converts an image from one color space to another.
|
static void |
opencv_cudaimgproc.cvtColor(opencv_core.Mat src,
opencv_core.Mat dst,
int code,
int dcn,
opencv_core.Stream stream)
\addtogroup cudaimgproc
\{
|
static void |
opencv_imgproc.cvtColorTwoPlane(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int code) |
static void |
opencv_core.dct(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.dct(opencv_core.Mat src,
opencv_core.Mat dst,
int flags)
\brief Performs a forward or inverse discrete Cosine transform of 1D or 2D array.
|
static void |
opencv_xphoto.dctDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
double sigma) |
static void |
opencv_xphoto.dctDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
double sigma,
int psize)
\addtogroup xphoto
\{
|
void |
opencv_videostab.DeblurerBase.deblur(int idx,
opencv_core.Mat frame) |
void |
opencv_videostab.NullDeblurer.deblur(int arg0,
opencv_core.Mat arg1) |
void |
opencv_videostab.WeightingDeblurer.deblur(int idx,
opencv_core.Mat frame) |
boolean |
opencv_structured_light.StructuredLightPattern.decode(opencv_core.MatVectorVector patternImages,
opencv_core.Mat disparityMap) |
boolean |
opencv_structured_light.StructuredLightPattern.decode(opencv_core.MatVectorVector patternImages,
opencv_core.Mat disparityMap,
opencv_core.GpuMatVector blackImages,
opencv_core.GpuMatVector whiteImages,
int flags) |
boolean |
opencv_structured_light.StructuredLightPattern.decode(opencv_core.MatVectorVector patternImages,
opencv_core.Mat disparityMap,
opencv_core.MatVector blackImages,
opencv_core.MatVector whiteImages,
int flags)
\brief Decodes the structured light pattern, generating a disparity map
|
boolean |
opencv_structured_light.StructuredLightPattern.decode(opencv_core.MatVectorVector patternImages,
opencv_core.Mat disparityMap,
opencv_core.UMatVector blackImages,
opencv_core.UMatVector whiteImages,
int flags) |
static void |
opencv_photo.decolor(opencv_core.Mat src,
opencv_core.Mat grayscale,
opencv_core.Mat color_boost)
\} photo_hdr
|
static void |
opencv_calib3d.decomposeEssentialMat(opencv_core.Mat E,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat t)
\brief Decompose an essential matrix to possible rotations and translation.
|
static int |
opencv_calib3d.decomposeHomographyMat(opencv_core.Mat H,
opencv_core.Mat K,
opencv_core.GpuMatVector rotations,
opencv_core.GpuMatVector translations,
opencv_core.GpuMatVector normals) |
static int |
opencv_calib3d.decomposeHomographyMat(opencv_core.Mat H,
opencv_core.Mat K,
opencv_core.MatVector rotations,
opencv_core.MatVector translations,
opencv_core.MatVector normals)
\brief Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
|
static int |
opencv_calib3d.decomposeHomographyMat(opencv_core.Mat H,
opencv_core.Mat K,
opencv_core.UMatVector rotations,
opencv_core.UMatVector translations,
opencv_core.UMatVector normals) |
static void |
opencv_calib3d.decomposeProjectionMatrix(opencv_core.Mat projMatrix,
opencv_core.Mat cameraMatrix,
opencv_core.Mat rotMatrix,
opencv_core.Mat transVect) |
static void |
opencv_calib3d.decomposeProjectionMatrix(opencv_core.Mat projMatrix,
opencv_core.Mat cameraMatrix,
opencv_core.Mat rotMatrix,
opencv_core.Mat transVect,
opencv_core.Mat rotMatrixX,
opencv_core.Mat rotMatrixY,
opencv_core.Mat rotMatrixZ,
opencv_core.Mat eulerAngles)
\brief Decomposes a projection matrix into a rotation matrix and a camera matrix.
|
static void |
opencv_cudaimgproc.demosaicing(opencv_core.Mat src,
opencv_core.Mat dst,
int code) |
static void |
opencv_imgproc.demosaicing(opencv_core.Mat _src,
opencv_core.Mat _dst,
int code) |
static void |
opencv_imgproc.demosaicing(opencv_core.Mat _src,
opencv_core.Mat _dst,
int code,
int dcn)
\} imgproc_misc
|
static void |
opencv_cudaimgproc.demosaicing(opencv_core.Mat src,
opencv_core.Mat dst,
int code,
int dcn,
opencv_core.Stream stream)
\brief Converts an image from Bayer pattern to RGB or grayscale.
|
static void |
opencv_photo.denoise_TVL1(opencv_core.MatVector observations,
opencv_core.Mat result) |
static void |
opencv_photo.denoise_TVL1(opencv_core.MatVector observations,
opencv_core.Mat result,
double lambda,
int niters)
\brief Primal-dual algorithm is an algorithm for solving special types of variational problems (that is,
finding a function to minimize some functional).
|
static void |
opencv_photo.detailEnhance(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_photo.detailEnhance(opencv_core.Mat src,
opencv_core.Mat dst,
float sigma_s,
float sigma_r)
\brief This filter enhances the details of a particular image.
|
void |
opencv_features2d.Feature2D.detect(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints) |
void |
opencv_features2d.Feature2D.detect(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
opencv_core.Mat mask)
\brief Detects keypoints in an image (first variant) or image set (second variant).
|
boolean |
opencv_objdetect.QRCodeDetector.detect(opencv_core.Mat in,
opencv_core.Mat points) |
void |
opencv_cudaimgproc.CannyEdgeDetector.detect(opencv_core.Mat image,
opencv_core.Mat edges) |
void |
opencv_cudaimgproc.HoughLinesDetector.detect(opencv_core.Mat src,
opencv_core.Mat lines) |
void |
opencv_cudaimgproc.HoughSegmentDetector.detect(opencv_core.Mat src,
opencv_core.Mat lines) |
void |
opencv_cudaimgproc.HoughCirclesDetector.detect(opencv_core.Mat src,
opencv_core.Mat circles) |
void |
opencv_cudaimgproc.CornersDetector.detect(opencv_core.Mat image,
opencv_core.Mat corners) |
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.Mat image,
opencv_core.Mat positions) |
void |
opencv_imgproc.LineSegmentDetector.detect(opencv_core.Mat _image,
opencv_core.Mat _lines) |
void |
opencv_cudaimgproc.CannyEdgeDetector.detect(opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Mat edges) |
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.Mat image,
opencv_core.Mat positions,
opencv_core.Mat votes)
find template on image
|
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.Mat edges,
opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Mat positions) |
void |
opencv_imgproc.GeneralizedHough.detect(opencv_core.Mat edges,
opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Mat positions,
opencv_core.Mat votes) |
void |
opencv_imgproc.LineSegmentDetector.detect(opencv_core.Mat _image,
opencv_core.Mat _lines,
opencv_core.Mat width,
opencv_core.Mat prec,
opencv_core.Mat nfa)
\brief Finds lines in the input image.
|
void |
opencv_cudaimgproc.CannyEdgeDetector.detect(opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Mat edges,
opencv_core.Stream stream)
\overload
|
void |
opencv_cudaimgproc.CornersDetector.detect(opencv_core.Mat image,
opencv_core.Mat corners,
opencv_core.Mat mask,
opencv_core.Stream stream)
\brief Determines strong corners on an image.
|
void |
opencv_cudaimgproc.CannyEdgeDetector.detect(opencv_core.Mat image,
opencv_core.Mat edges,
opencv_core.Stream stream)
\brief Finds edges in an image using the \cite Canny86 algorithm.
|
void |
opencv_cudaimgproc.HoughLinesDetector.detect(opencv_core.Mat src,
opencv_core.Mat lines,
opencv_core.Stream stream)
\brief Finds lines in a binary image using the classical Hough transform.
|
void |
opencv_cudaimgproc.HoughSegmentDetector.detect(opencv_core.Mat src,
opencv_core.Mat lines,
opencv_core.Stream stream)
\brief Finds line segments in a binary image using the probabilistic Hough transform.
|
void |
opencv_cudaimgproc.HoughCirclesDetector.detect(opencv_core.Mat src,
opencv_core.Mat circles,
opencv_core.Stream stream)
\brief Finds circles in a grayscale image using the Hough transform.
|
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations) |
void |
opencv_cudaobjdetect.HOG.detect(opencv_core.Mat img,
opencv_core.PointVector found_locations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
double[] weights) |
void |
opencv_cudaobjdetect.HOG.detect(opencv_core.Mat img,
opencv_core.PointVector found_locations,
double[] confidences) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
double[] weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
DoubleBuffer weights) |
void |
opencv_cudaobjdetect.HOG.detect(opencv_core.Mat img,
opencv_core.PointVector found_locations,
DoubleBuffer confidences) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
DoubleBuffer weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector searchLocations) |
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector searchLocations)
\brief Performs object detection without a multi-scale window.
|
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
org.bytedeco.javacpp.DoublePointer weights) |
void |
opencv_cudaobjdetect.HOG.detect(opencv_core.Mat img,
opencv_core.PointVector found_locations,
org.bytedeco.javacpp.DoublePointer confidences)
\brief Performs object detection without a multi-scale window.
|
void |
opencv_objdetect.HOGDescriptor.detect(opencv_core.Mat img,
opencv_core.PointVector foundLocations,
org.bytedeco.javacpp.DoublePointer weights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
opencv_core.PointVector searchLocations)
\brief Performs object detection without a multi-scale window.
|
void |
opencv_objdetect.DetectionBasedTracker.IDetector.detect(opencv_core.Mat image,
opencv_core.RectVector objects) |
void |
opencv_text.TextDetector.detect(opencv_core.Mat inputImage,
opencv_core.RectVector Bbox,
float[] confidence) |
void |
opencv_text.TextDetectorCNN.detect(opencv_core.Mat inputImage,
opencv_core.RectVector Bbox,
float[] confidence) |
void |
opencv_text.TextDetector.detect(opencv_core.Mat inputImage,
opencv_core.RectVector Bbox,
FloatBuffer confidence) |
void |
opencv_text.TextDetectorCNN.detect(opencv_core.Mat inputImage,
opencv_core.RectVector Bbox,
FloatBuffer confidence) |
void |
opencv_text.TextDetector.detect(opencv_core.Mat inputImage,
opencv_core.RectVector Bbox,
org.bytedeco.javacpp.FloatPointer confidence)
\brief Method that provides a quick and simple interface to detect text inside an image
|
void |
opencv_text.TextDetectorCNN.detect(opencv_core.Mat inputImage,
opencv_core.RectVector Bbox,
org.bytedeco.javacpp.FloatPointer confidence)
\overload
|
void |
opencv_xfeatures2d.AffineFeature2D.detect(opencv_core.Mat image,
opencv_xfeatures2d.Elliptic_KeyPoint keypoints) |
void |
opencv_xfeatures2d.AffineFeature2D.detect(opencv_core.Mat image,
opencv_xfeatures2d.Elliptic_KeyPoint keypoints,
opencv_core.Mat mask)
\brief Detects keypoints in the image using the wrapped detector and
performs affine adaptation to augment them with their elliptic regions.
|
void |
opencv_features2d.Feature2D.detectAndCompute(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_core.KeyPointVector keypoints,
opencv_core.Mat descriptors) |
void |
opencv_features2d.Feature2D.detectAndCompute(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_core.KeyPointVector keypoints,
opencv_core.Mat descriptors,
boolean useProvidedKeypoints)
Detects keypoints and computes the descriptors
|
void |
opencv_xfeatures2d.AffineFeature2D.detectAndCompute(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_xfeatures2d.Elliptic_KeyPoint keypoints,
opencv_core.Mat descriptors) |
void |
opencv_xfeatures2d.AffineFeature2D.detectAndCompute(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_xfeatures2d.Elliptic_KeyPoint keypoints,
opencv_core.Mat descriptors,
boolean useProvidedKeypoints)
\brief Detects keypoints and computes descriptors for their surrounding
regions, after warping them into circles.
|
static void |
opencv_aruco.detectCharucoDiamond(opencv_core.Mat image,
opencv_core.GpuMatVector markerCorners,
opencv_core.Mat markerIds,
float squareMarkerLengthRate,
opencv_core.GpuMatVector diamondCorners,
opencv_core.Mat diamondIds) |
static void |
opencv_aruco.detectCharucoDiamond(opencv_core.Mat image,
opencv_core.GpuMatVector markerCorners,
opencv_core.Mat markerIds,
float squareMarkerLengthRate,
opencv_core.GpuMatVector diamondCorners,
opencv_core.Mat diamondIds,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static void |
opencv_aruco.detectCharucoDiamond(opencv_core.Mat image,
opencv_core.MatVector markerCorners,
opencv_core.Mat markerIds,
float squareMarkerLengthRate,
opencv_core.MatVector diamondCorners,
opencv_core.Mat diamondIds) |
static void |
opencv_aruco.detectCharucoDiamond(opencv_core.Mat image,
opencv_core.MatVector markerCorners,
opencv_core.Mat markerIds,
float squareMarkerLengthRate,
opencv_core.MatVector diamondCorners,
opencv_core.Mat diamondIds,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs)
\brief Detect ChArUco Diamond markers
|
static void |
opencv_aruco.detectCharucoDiamond(opencv_core.Mat image,
opencv_core.UMatVector markerCorners,
opencv_core.Mat markerIds,
float squareMarkerLengthRate,
opencv_core.UMatVector diamondCorners,
opencv_core.Mat diamondIds) |
static void |
opencv_aruco.detectCharucoDiamond(opencv_core.Mat image,
opencv_core.UMatVector markerCorners,
opencv_core.Mat markerIds,
float squareMarkerLengthRate,
opencv_core.UMatVector diamondCorners,
opencv_core.Mat diamondIds,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
void |
opencv_ximgproc.StructuredEdgeDetection.detectEdges(opencv_core.Mat _src,
opencv_core.Mat _dst)
\brief The function detects edges in src and draw them to dst.
|
static void |
opencv_aruco.detectMarkers(opencv_core.Mat image,
opencv_aruco.Dictionary dictionary,
opencv_core.GpuMatVector corners,
opencv_core.Mat ids) |
static void |
opencv_aruco.detectMarkers(opencv_core.Mat image,
opencv_aruco.Dictionary dictionary,
opencv_core.GpuMatVector corners,
opencv_core.Mat ids,
opencv_aruco.DetectorParameters parameters,
opencv_core.GpuMatVector rejectedImgPoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeff) |
static void |
opencv_aruco.detectMarkers(opencv_core.Mat image,
opencv_aruco.Dictionary dictionary,
opencv_core.MatVector corners,
opencv_core.Mat ids) |
static void |
opencv_aruco.detectMarkers(opencv_core.Mat image,
opencv_aruco.Dictionary dictionary,
opencv_core.MatVector corners,
opencv_core.Mat ids,
opencv_aruco.DetectorParameters parameters,
opencv_core.MatVector rejectedImgPoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeff)
\brief Basic marker detection
|
static void |
opencv_aruco.detectMarkers(opencv_core.Mat image,
opencv_aruco.Dictionary dictionary,
opencv_core.UMatVector corners,
opencv_core.Mat ids) |
static void |
opencv_aruco.detectMarkers(opencv_core.Mat image,
opencv_aruco.Dictionary dictionary,
opencv_core.UMatVector corners,
opencv_core.Mat ids,
opencv_aruco.DetectorParameters parameters,
opencv_core.UMatVector rejectedImgPoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeff) |
void |
opencv_cudaobjdetect.CudaCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.Mat objects) |
void |
opencv_cudaobjdetect.CudaCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.Mat objects,
opencv_core.Stream stream)
\brief Detects objects of different sizes in the input image.
|
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations) |
void |
opencv_cudaobjdetect.HOG.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector found_locations) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
double[] foundWeights) |
void |
opencv_cudaobjdetect.HOG.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector found_locations,
double[] confidences) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
double[] foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
DoubleBuffer foundWeights) |
void |
opencv_cudaobjdetect.HOG.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector found_locations,
DoubleBuffer confidences) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
DoubleBuffer foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize)
\brief Detects objects of different sizes in the input image.
|
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping)
\brief Detects objects of different sizes in the input image.
|
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
org.bytedeco.javacpp.DoublePointer foundWeights) |
void |
opencv_cudaobjdetect.HOG.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector found_locations,
org.bytedeco.javacpp.DoublePointer confidences)
\brief Performs object detection with a multi-scale window.
|
void |
opencv_objdetect.HOGDescriptor.detectMultiScale(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
org.bytedeco.javacpp.DoublePointer foundWeights,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding,
double scale,
double finalThreshold,
boolean useMeanshiftGrouping)
\brief Detects objects of different sizes in the input image.
|
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
int[] rejectLevels,
double[] levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
int[] numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
IntBuffer rejectLevels,
DoubleBuffer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
IntBuffer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
org.bytedeco.javacpp.IntPointer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.BaseCascadeClassifier.detectMultiScale(opencv_core.Mat image,
opencv_core.RectVector objects,
org.bytedeco.javacpp.IntPointer rejectLevels,
org.bytedeco.javacpp.DoublePointer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.Mat image,
opencv_core.RectVector objects,
int[] numDetections) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.Mat image,
opencv_core.RectVector objects,
int[] numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.Mat image,
opencv_core.RectVector objects,
IntBuffer numDetections) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.Mat image,
opencv_core.RectVector objects,
IntBuffer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.Mat image,
opencv_core.RectVector objects,
org.bytedeco.javacpp.IntPointer numDetections) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale2(opencv_core.Mat image,
opencv_core.RectVector objects,
org.bytedeco.javacpp.IntPointer numDetections,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize)
\overload
|
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.Mat image,
opencv_core.RectVector objects,
int[] rejectLevels,
double[] levelWeights) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.Mat image,
opencv_core.RectVector objects,
int[] rejectLevels,
double[] levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.Mat image,
opencv_core.RectVector objects,
IntBuffer rejectLevels,
DoubleBuffer levelWeights) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.Mat image,
opencv_core.RectVector objects,
IntBuffer rejectLevels,
DoubleBuffer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.Mat image,
opencv_core.RectVector objects,
org.bytedeco.javacpp.IntPointer rejectLevels,
org.bytedeco.javacpp.DoublePointer levelWeights) |
void |
opencv_objdetect.CascadeClassifier.detectMultiScale3(opencv_core.Mat image,
opencv_core.RectVector objects,
org.bytedeco.javacpp.IntPointer rejectLevels,
org.bytedeco.javacpp.DoublePointer levelWeights,
double scaleFactor,
int minNeighbors,
int flags,
opencv_core.Size minSize,
opencv_core.Size maxSize,
boolean outputRejectLevels)
\overload
This function allows you to retrieve the final stage decision certainty of classification.
|
void |
opencv_objdetect.HOGDescriptor.detectMultiScaleROI(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
opencv_objdetect.DetectionROI locations) |
void |
opencv_objdetect.HOGDescriptor.detectMultiScaleROI(opencv_core.Mat img,
opencv_core.RectVector foundLocations,
opencv_objdetect.DetectionROI locations,
double hitThreshold,
int groupThreshold)
\brief evaluate specified ROI and return confidence value for each location in multiple scales
|
static boolean |
opencv_objdetect.detectQRCode(opencv_core.Mat in,
opencv_core.PointVector points) |
static boolean |
opencv_objdetect.detectQRCode(opencv_core.Mat in,
opencv_core.PointVector points,
double eps_x,
double eps_y)
\brief Detect QR code in image and return minimum area of quadrangle that describes QR code.
|
void |
opencv_features2d.MSER.detectRegions(opencv_core.Mat image,
opencv_core.PointVectorVector msers,
opencv_core.RectVector bboxes)
\brief Detect %MSER regions
|
static void |
opencv_text.detectRegions(opencv_core.Mat image,
opencv_text.ERFilter er_filter1,
opencv_text.ERFilter er_filter2,
opencv_core.PointVectorVector regions) |
static void |
opencv_text.detectRegions(opencv_core.Mat image,
opencv_text.ERFilter er_filter1,
opencv_text.ERFilter er_filter2,
opencv_core.RectVector groups_rects) |
static void |
opencv_text.detectRegions(opencv_core.Mat image,
opencv_text.ERFilter er_filter1,
opencv_text.ERFilter er_filter2,
opencv_core.RectVector groups_rects,
int method,
org.bytedeco.javacpp.BytePointer filename,
float minProbability)
\brief Extracts text regions from image.
|
static void |
opencv_text.detectRegions(opencv_core.Mat image,
opencv_text.ERFilter er_filter1,
opencv_text.ERFilter er_filter2,
opencv_core.RectVector groups_rects,
int method,
String filename,
float minProbability) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.PointVector locations,
opencv_core.PointVector foundLocations,
double[] confidences) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.PointVector locations,
opencv_core.PointVector foundLocations,
double[] confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.PointVector locations,
opencv_core.PointVector foundLocations,
DoubleBuffer confidences) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.PointVector locations,
opencv_core.PointVector foundLocations,
DoubleBuffer confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.PointVector locations,
opencv_core.PointVector foundLocations,
org.bytedeco.javacpp.DoublePointer confidences) |
void |
opencv_objdetect.HOGDescriptor.detectROI(opencv_core.Mat img,
opencv_core.PointVector locations,
opencv_core.PointVector foundLocations,
org.bytedeco.javacpp.DoublePointer confidences,
double hitThreshold,
opencv_core.Size winStride,
opencv_core.Size padding)
\brief evaluate specified ROI and return confidence value for each location
|
static double |
opencv_core.determinant(opencv_core.Mat mtx)
\brief Returns the determinant of a square floating-point matrix.
|
static void |
opencv_core.dft(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.dft(opencv_core.Mat src,
opencv_core.Mat dst,
int flags,
int nonzeroRows)
\brief Performs a forward or inverse Discrete Fourier transform of a 1D or 2D floating-point array.
|
static void |
opencv_cudaarithm.dft(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dft_size) |
static void |
opencv_cudaarithm.dft(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dft_size,
int flags,
opencv_core.Stream stream)
\brief Performs a forward or inverse discrete Fourier transform (1D or 2D) of the floating point matrix.
|
static opencv_core.Mat |
opencv_core.Mat.diag(opencv_core.Mat d)
\brief creates a diagonal matrix
|
static void |
opencv_imgproc.dilate(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat kernel) |
static void |
opencv_imgproc.dilate(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue)
\brief Dilates an image by using a specific structuring element.
|
static void |
opencv_imgproc.distanceTransform(opencv_core.Mat src,
opencv_core.Mat dst,
int distanceType,
int maskSize) |
static void |
opencv_imgproc.distanceTransform(opencv_core.Mat src,
opencv_core.Mat dst,
int distanceType,
int maskSize,
int dstType)
\overload
|
static void |
opencv_imgproc.distanceTransformWithLabels(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat labels,
int distanceType,
int maskSize) |
static void |
opencv_imgproc.distanceTransformWithLabels(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat labels,
int distanceType,
int maskSize,
int labelType)
\brief Calculates the distance to the closest zero pixel for each pixel of the source image.
|
static void |
opencv_calib3d.distortPoints(opencv_core.Mat undistorted,
opencv_core.Mat distorted,
opencv_core.Mat K,
opencv_core.Mat D) |
static void |
opencv_calib3d.distortPoints(opencv_core.Mat undistorted,
opencv_core.Mat distorted,
opencv_core.Mat K,
opencv_core.Mat D,
double alpha)
\brief Distorts 2D points using fisheye model.
|
static opencv_core.MatExpr |
opencv_core.divide(double s,
opencv_core.Mat a) |
static void |
opencv_core.divide(double scale,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.divide(double scale,
opencv_core.Mat src2,
opencv_core.Mat dst,
int dtype)
\overload
|
static opencv_core.MatExpr |
opencv_core.divide(opencv_core.Mat a,
double s) |
static opencv_core.MatExpr |
opencv_core.divide(opencv_core.MatExpr e,
opencv_core.Mat m) |
static opencv_core.MatExpr |
opencv_core.divide(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.MatExpr |
opencv_core.divide(opencv_core.Mat m,
opencv_core.MatExpr e) |
static void |
opencv_core.divide(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.divide(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.divide(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
double scale,
int dtype)
\brief Performs per-element division of two arrays or a scalar by an array.
|
static void |
opencv_cudaarithm.divide(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
double scale,
int dtype,
opencv_core.Stream stream)
\brief Computes a matrix-matrix or matrix-scalar division.
|
static opencv_core.Mat |
opencv_core.dividePut(opencv_core.Mat a,
double b) |
static opencv_core.Mat |
opencv_core.dividePut(opencv_core.Mat a,
opencv_core.Mat b) |
double |
opencv_core.Mat.dot(opencv_core.Mat m)
\brief Computes a dot-product of two vectors.
|
double |
opencv_core.UMat.dot(opencv_core.Mat m)
computes dot-product
|
double |
opencv_core.MatExpr.dot(opencv_core.Mat m) |
void |
opencv_core.GpuMat.download(opencv_core.Mat dst)
\brief Performs data download from GpuMat (Blocking call)
|
void |
opencv_core.GpuMat.download(opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Performs data download from GpuMat (Non-Blocking call)
|
void |
opencv_cudaimgproc.HoughLinesDetector.downloadResults(opencv_core.Mat d_lines,
opencv_core.Mat h_lines) |
void |
opencv_cudaimgproc.HoughLinesDetector.downloadResults(opencv_core.Mat d_lines,
opencv_core.Mat h_lines,
opencv_core.Mat h_votes,
opencv_core.Stream stream)
\brief Downloads results from cuda::HoughLinesDetector::detect to host memory.
|
void |
opencv_aruco.GridBoard.draw(opencv_core.Size outSize,
opencv_core.Mat img) |
void |
opencv_aruco.CharucoBoard.draw(opencv_core.Size outSize,
opencv_core.Mat img) |
void |
opencv_aruco.GridBoard.draw(opencv_core.Size outSize,
opencv_core.Mat img,
int marginSize,
int borderBits)
\brief Draw a GridBoard
|
void |
opencv_aruco.CharucoBoard.draw(opencv_core.Size outSize,
opencv_core.Mat img,
int marginSize,
int borderBits)
\brief Draw a ChArUco board
|
static void |
opencv_aruco.drawAxis(opencv_core.Mat image,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
float length)
\brief Draw coordinate system axis from pose estimation
|
static void |
opencv_aruco.drawCharucoDiamond(opencv_aruco.Dictionary dictionary,
opencv_core.Scalar4i ids,
int squareLength,
int markerLength,
opencv_core.Mat img) |
static void |
opencv_aruco.drawCharucoDiamond(opencv_aruco.Dictionary dictionary,
opencv_core.Scalar4i ids,
int squareLength,
int markerLength,
opencv_core.Mat img,
int marginSize,
int borderBits)
\brief Draw a ChArUco Diamond marker
|
static void |
opencv_calib3d.drawChessboardCorners(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat corners,
boolean patternWasFound)
\brief Renders the detected chessboard corners.
|
static void |
opencv_imgproc.drawContours(opencv_core.Mat image,
opencv_core.GpuMatVector contours,
int contourIdx,
opencv_core.Scalar color) |
static void |
opencv_imgproc.drawContours(opencv_core.Mat image,
opencv_core.GpuMatVector contours,
int contourIdx,
opencv_core.Scalar color,
int thickness,
int lineType,
opencv_core.Mat hierarchy,
int maxLevel,
opencv_core.Point offset) |
static void |
opencv_imgproc.drawContours(opencv_core.Mat image,
opencv_core.MatVector contours,
int contourIdx,
opencv_core.Scalar color) |
static void |
opencv_imgproc.drawContours(opencv_core.Mat image,
opencv_core.MatVector contours,
int contourIdx,
opencv_core.Scalar color,
int thickness,
int lineType,
opencv_core.Mat hierarchy,
int maxLevel,
opencv_core.Point offset)
\brief Draws contours outlines or filled contours.
|
static void |
opencv_imgproc.drawContours(opencv_core.Mat image,
opencv_core.UMatVector contours,
int contourIdx,
opencv_core.Scalar color) |
static void |
opencv_imgproc.drawContours(opencv_core.Mat image,
opencv_core.UMatVector contours,
int contourIdx,
opencv_core.Scalar color,
int thickness,
int lineType,
opencv_core.Mat hierarchy,
int maxLevel,
opencv_core.Point offset) |
static void |
opencv_aruco.drawDetectedCornersCharuco(opencv_core.Mat image,
opencv_core.Mat charucoCorners) |
static void |
opencv_aruco.drawDetectedCornersCharuco(opencv_core.Mat image,
opencv_core.Mat charucoCorners,
opencv_core.Mat charucoIds,
opencv_core.Scalar cornerColor)
\brief Draws a set of Charuco corners
|
static void |
opencv_aruco.drawDetectedDiamonds(opencv_core.Mat image,
opencv_core.GpuMatVector diamondCorners) |
static void |
opencv_aruco.drawDetectedDiamonds(opencv_core.Mat image,
opencv_core.GpuMatVector diamondCorners,
opencv_core.Mat diamondIds,
opencv_core.Scalar borderColor) |
static void |
opencv_aruco.drawDetectedDiamonds(opencv_core.Mat image,
opencv_core.MatVector diamondCorners) |
static void |
opencv_aruco.drawDetectedDiamonds(opencv_core.Mat image,
opencv_core.MatVector diamondCorners,
opencv_core.Mat diamondIds,
opencv_core.Scalar borderColor)
\brief Draw a set of detected ChArUco Diamond markers
|
static void |
opencv_aruco.drawDetectedDiamonds(opencv_core.Mat image,
opencv_core.UMatVector diamondCorners) |
static void |
opencv_aruco.drawDetectedDiamonds(opencv_core.Mat image,
opencv_core.UMatVector diamondCorners,
opencv_core.Mat diamondIds,
opencv_core.Scalar borderColor) |
static void |
opencv_aruco.drawDetectedMarkers(opencv_core.Mat image,
opencv_core.GpuMatVector corners) |
static void |
opencv_aruco.drawDetectedMarkers(opencv_core.Mat image,
opencv_core.GpuMatVector corners,
opencv_core.Mat ids,
opencv_core.Scalar borderColor) |
static void |
opencv_aruco.drawDetectedMarkers(opencv_core.Mat image,
opencv_core.MatVector corners) |
static void |
opencv_aruco.drawDetectedMarkers(opencv_core.Mat image,
opencv_core.MatVector corners,
opencv_core.Mat ids,
opencv_core.Scalar borderColor)
\brief Draw detected markers in image
|
static void |
opencv_aruco.drawDetectedMarkers(opencv_core.Mat image,
opencv_core.UMatVector corners) |
static void |
opencv_aruco.drawDetectedMarkers(opencv_core.Mat image,
opencv_core.UMatVector corners,
opencv_core.Mat ids,
opencv_core.Scalar borderColor) |
static void |
opencv_face.drawFacemarks(opencv_core.Mat image,
opencv_core.Point2fVector points,
opencv_core.Scalar color)
\brief Utility to draw the detected facial landmark points
|
static void |
opencv_features2d.drawKeypoints(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
opencv_core.Mat outImage) |
static void |
opencv_features2d.drawKeypoints(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
opencv_core.Mat outImage,
opencv_core.Scalar color,
int flags)
\brief Draws keypoints.
|
void |
opencv_aruco.Dictionary.drawMarker(int id,
int sidePixels,
opencv_core.Mat _img) |
void |
opencv_aruco.Dictionary.drawMarker(int id,
int sidePixels,
opencv_core.Mat _img,
int borderBits)
\brief Draw a canonical marker image
|
static void |
opencv_aruco.drawMarker(opencv_aruco.Dictionary dictionary,
int id,
int sidePixels,
opencv_core.Mat img) |
static void |
opencv_aruco.drawMarker(opencv_aruco.Dictionary dictionary,
int id,
int sidePixels,
opencv_core.Mat img,
int borderBits)
\brief Draw a canonical marker image
|
static void |
opencv_imgproc.drawMarker(opencv_core.Mat img,
opencv_core.Point position,
opencv_core.Scalar color) |
static void |
opencv_imgproc.drawMarker(opencv_core.Mat img,
opencv_core.Point position,
opencv_core.Scalar color,
int markerType,
int markerSize,
int thickness,
int line_type)
\brief Draws a marker on a predefined position in an image.
|
static void |
opencv_features2d.drawMatches(opencv_core.Mat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.Mat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVector matches1to2,
opencv_core.Mat outImg) |
static void |
opencv_features2d.drawMatches(opencv_core.Mat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.Mat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVector matches1to2,
opencv_core.Mat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
byte[] matchesMask,
int flags) |
static void |
opencv_features2d.drawMatches(opencv_core.Mat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.Mat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVector matches1to2,
opencv_core.Mat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
ByteBuffer matchesMask,
int flags) |
static void |
opencv_features2d.drawMatches(opencv_core.Mat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.Mat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVector matches1to2,
opencv_core.Mat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
org.bytedeco.javacpp.BytePointer matchesMask,
int flags)
\brief Draws the found matches of keypoints from two images.
|
static void |
opencv_features2d.drawMatchesKnn(opencv_core.Mat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.Mat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVectorVector matches1to2,
opencv_core.Mat outImg) |
static void |
opencv_features2d.drawMatchesKnn(opencv_core.Mat img1,
opencv_core.KeyPointVector keypoints1,
opencv_core.Mat img2,
opencv_core.KeyPointVector keypoints2,
opencv_core.DMatchVectorVector matches1to2,
opencv_core.Mat outImg,
opencv_core.Scalar matchColor,
opencv_core.Scalar singlePointColor,
opencv_core.ByteVectorVector matchesMask,
int flags)
\overload
|
static void |
opencv_aruco.drawPlanarBoard(opencv_aruco.Board board,
opencv_core.Size outSize,
opencv_core.Mat img) |
static void |
opencv_aruco.drawPlanarBoard(opencv_aruco.Board board,
opencv_core.Size outSize,
opencv_core.Mat img,
int marginSize,
int borderBits)
\brief Draw a planar board
\sa _drawPlanarBoardImpl
|
void |
opencv_imgproc.LineSegmentDetector.drawSegments(opencv_core.Mat _image,
opencv_core.Mat lines)
\brief Draws the line segments on a given image.
|
static void |
opencv_xfeatures2d.PCTSignatures.drawSignature(opencv_core.Mat source,
opencv_core.Mat signature,
opencv_core.Mat result) |
static void |
opencv_xfeatures2d.PCTSignatures.drawSignature(opencv_core.Mat source,
opencv_core.Mat signature,
opencv_core.Mat result,
float radiusToShorterSideRatio,
int borderThickness)
\brief Draws signature in the source image and outputs the result.
|
static void |
opencv_ximgproc.dtFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
double sigmaSpatial,
double sigmaColor) |
static void |
opencv_ximgproc.dtFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters)
\brief Simple one-line Domain Transform filter call.
|
static void |
opencv_photo.edgePreservingFilter(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_photo.edgePreservingFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int flags,
float sigma_s,
float sigma_r)
\} photo_clone
|
void |
opencv_ximgproc.StructuredEdgeDetection.edgesNms(opencv_core.Mat edge_image,
opencv_core.Mat orientation_image,
opencv_core.Mat _dst) |
void |
opencv_ximgproc.StructuredEdgeDetection.edgesNms(opencv_core.Mat edge_image,
opencv_core.Mat orientation_image,
opencv_core.Mat _dst,
int r,
int s,
float m,
boolean isParallel)
\brief The function edgenms in edge image and suppress edges where edge is stronger in orthogonal direction.
|
static boolean |
opencv_core.eigen(opencv_core.Mat src,
opencv_core.Mat eigenvalues) |
static boolean |
opencv_core.eigen(opencv_core.Mat src,
opencv_core.Mat eigenvalues,
opencv_core.Mat eigenvectors)
\brief Calculates eigenvalues and eigenvectors of a symmetric matrix.
|
static void |
opencv_core.eigenNonSymmetric(opencv_core.Mat src,
opencv_core.Mat eigenvalues,
opencv_core.Mat eigenvectors)
\brief Calculates eigenvalues and eigenvectors of a non-symmetric matrix (real eigenvalues only).
|
opencv_core.PCA |
opencv_core.PCA.eigenvalues(opencv_core.Mat eigenvalues) |
opencv_core.PCA |
opencv_core.PCA.eigenvectors(opencv_core.Mat eigenvectors) |
static void |
opencv_imgproc.ellipse(opencv_core.Mat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color) |
static void |
opencv_imgproc.ellipse(opencv_core.Mat img,
opencv_core.Point center,
opencv_core.Size axes,
double angle,
double startAngle,
double endAngle,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
\brief Draws a simple or thick elliptic arc or fills an ellipse sector.
|
static void |
opencv_imgproc.ellipse(opencv_core.Mat img,
opencv_core.RotatedRect box,
opencv_core.Scalar color) |
static void |
opencv_imgproc.ellipse(opencv_core.Mat img,
opencv_core.RotatedRect box,
opencv_core.Scalar color,
int thickness,
int lineType)
\overload
|
static float |
opencv_imgproc.EMD(opencv_core.Mat signature1,
opencv_core.Mat signature2,
int distType) |
static float |
opencv_imgproc.EMD(opencv_core.Mat signature1,
opencv_core.Mat signature2,
int distType,
opencv_core.Mat cost,
float[] lowerBound,
opencv_core.Mat flow) |
static float |
opencv_imgproc.EMD(opencv_core.Mat signature1,
opencv_core.Mat signature2,
int distType,
opencv_core.Mat cost,
FloatBuffer lowerBound,
opencv_core.Mat flow) |
static float |
opencv_imgproc.EMD(opencv_core.Mat signature1,
opencv_core.Mat signature2,
int distType,
opencv_core.Mat cost,
org.bytedeco.javacpp.FloatPointer lowerBound,
opencv_core.Mat flow)
\brief Computes the "minimal work" distance between two weighted point configurations.
|
static float |
opencv_shape.EMDL1(opencv_core.Mat signature1,
opencv_core.Mat signature2)
\addtogroup shape
/** \{
|
static opencv_core.Mat |
opencv_videostab.ensureInclusionConstraint(opencv_core.Mat M,
opencv_core.Size size,
float trimRatio) |
static void |
opencv_core.ensureSizeIsEnough(int rows,
int cols,
int type,
opencv_core.Mat arr)
\brief Ensures that the size of a matrix is big enough and the matrix has a proper type.
|
static void |
opencv_cudaimgproc.equalizeHist(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.equalizeHist(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Equalizes the histogram of a grayscale image.
|
static void |
opencv_cudaimgproc.equalizeHist(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Equalizes the histogram of a grayscale image.
|
static opencv_core.MatExpr |
opencv_core.equals(double s,
opencv_core.Mat a) |
static opencv_core.MatExpr |
opencv_core.equals(opencv_core.Mat a,
double s) |
static opencv_core.MatExpr |
opencv_core.equals(opencv_core.Mat a,
opencv_core.Mat b) |
static void |
opencv_text.erGrouping(opencv_core.Mat img,
opencv_core.GpuMatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects) |
static void |
opencv_text.erGrouping(opencv_core.Mat img,
opencv_core.GpuMatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects,
int method,
org.bytedeco.javacpp.BytePointer filename,
float minProbablity) |
static void |
opencv_text.erGrouping(opencv_core.Mat img,
opencv_core.GpuMatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects,
int method,
String filename,
float minProbablity) |
static void |
opencv_text.erGrouping(opencv_core.Mat image,
opencv_core.Mat channel,
opencv_core.PointVectorVector regions,
opencv_core.RectVector groups_rects) |
static void |
opencv_text.erGrouping(opencv_core.Mat image,
opencv_core.Mat channel,
opencv_core.PointVectorVector regions,
opencv_core.RectVector groups_rects,
int method,
org.bytedeco.javacpp.BytePointer filename,
float minProbablity) |
static void |
opencv_text.erGrouping(opencv_core.Mat image,
opencv_core.Mat channel,
opencv_core.PointVectorVector regions,
opencv_core.RectVector groups_rects,
int method,
String filename,
float minProbablity) |
static void |
opencv_text.erGrouping(opencv_core.Mat img,
opencv_core.MatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects) |
static void |
opencv_text.erGrouping(opencv_core.Mat img,
opencv_core.MatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects,
int method,
org.bytedeco.javacpp.BytePointer filename,
float minProbablity)
\brief Find groups of Extremal Regions that are organized as text blocks.
|
static void |
opencv_text.erGrouping(opencv_core.Mat img,
opencv_core.MatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects,
int method,
String filename,
float minProbablity) |
static void |
opencv_text.erGrouping(opencv_core.Mat img,
opencv_core.UMatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects) |
static void |
opencv_text.erGrouping(opencv_core.Mat img,
opencv_core.UMatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects,
int method,
org.bytedeco.javacpp.BytePointer filename,
float minProbablity) |
static void |
opencv_text.erGrouping(opencv_core.Mat img,
opencv_core.UMatVector channels,
opencv_text.ERStatVectorVector regions,
opencv_core.PointVectorVector groups,
opencv_core.RectVector groups_rects,
int method,
String filename,
float minProbablity) |
static void |
opencv_imgproc.erode(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat kernel) |
static void |
opencv_imgproc.erode(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue)
\brief Erodes an image by using a specific structuring element.
|
opencv_tracking.UnscentedKalmanFilterParams |
opencv_tracking.UnscentedKalmanFilterParams.errorCovInit(opencv_core.Mat errorCovInit) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.errorCovPost(opencv_core.Mat errorCovPost) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.errorCovPre(opencv_core.Mat errorCovPre) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.Mat points0,
opencv_core.Mat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.Mat points0,
opencv_core.Mat points1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.Mat points0,
opencv_core.Mat points1) |
opencv_core.Mat |
opencv_videostab.ImageMotionEstimatorBase.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
opencv_core.Mat |
opencv_videostab.FromFileMotionReader.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
opencv_core.Mat |
opencv_videostab.ToFileMotionWriter.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.ImageMotionEstimatorBase.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.FromFileMotionReader.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.ToFileMotionWriter.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
boolean[] ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorBase.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
org.bytedeco.javacpp.BoolPointer ok)
\brief Estimates global motion between two 2D point clouds.
|
opencv_core.Mat |
opencv_videostab.MotionEstimatorRansacL2.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.MotionEstimatorL1.estimate(opencv_core.Mat points0,
opencv_core.Mat points1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.ImageMotionEstimatorBase.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.FromFileMotionReader.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.ToFileMotionWriter.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
org.bytedeco.javacpp.BoolPointer ok) |
opencv_core.Mat |
opencv_videostab.KeypointBasedMotionEstimator.estimate(opencv_core.Mat frame0,
opencv_core.Mat frame1,
org.bytedeco.javacpp.BoolPointer ok) |
static opencv_core.Mat |
opencv_calib3d.estimateAffine2D(opencv_core.Mat from,
opencv_core.Mat to) |
static opencv_core.Mat |
opencv_calib3d.estimateAffine2D(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters)
\brief Computes an optimal affine transformation between two 2D point sets.
|
static int |
opencv_calib3d.estimateAffine3D(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat out,
opencv_core.Mat inliers) |
static int |
opencv_calib3d.estimateAffine3D(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat out,
opencv_core.Mat inliers,
double ransacThreshold,
double confidence)
\brief Computes an optimal affine transformation between two 3D point sets.
|
static opencv_core.Mat |
opencv_calib3d.estimateAffinePartial2D(opencv_core.Mat from,
opencv_core.Mat to) |
static opencv_core.Mat |
opencv_calib3d.estimateAffinePartial2D(opencv_core.Mat from,
opencv_core.Mat to,
opencv_core.Mat inliers,
int method,
double ransacReprojThreshold,
long maxIters,
double confidence,
long refineIters)
\brief Computes an optimal limited affine transformation with 4 degrees of freedom between
two 2D point sets.
|
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Mat points0,
opencv_core.Mat points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
float[] rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
FloatBuffer rmse) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionLeastSquares(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
org.bytedeco.javacpp.FloatPointer rmse)
\addtogroup videostab_motion
\{
|
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.Mat points0,
opencv_core.Mat points1) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
opencv_videostab.RansacParams params,
float[] rmse,
int[] ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
opencv_videostab.RansacParams params,
FloatBuffer rmse,
IntBuffer ninliers) |
static opencv_core.Mat |
opencv_videostab.estimateGlobalMotionRansac(opencv_core.Mat points0,
opencv_core.Mat points1,
int model,
opencv_videostab.RansacParams params,
org.bytedeco.javacpp.FloatPointer rmse,
org.bytedeco.javacpp.IntPointer ninliers)
\brief Estimates best global motion between two 2D point clouds robustly (using RANSAC method).
|
static void |
opencv_calib3d.estimateNewCameraMatrixForUndistortRectify(opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.Size image_size,
opencv_core.Mat R,
opencv_core.Mat P) |
static void |
opencv_calib3d.estimateNewCameraMatrixForUndistortRectify(opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.Size image_size,
opencv_core.Mat R,
opencv_core.Mat P,
double balance,
opencv_core.Size new_size,
double fov_scale)
\brief Estimates new camera matrix for undistortion or rectification.
|
static float |
opencv_videostab.estimateOptimalTrimRatio(opencv_core.Mat M,
opencv_core.Size size) |
static int |
opencv_aruco.estimatePoseBoard(opencv_core.GpuMatVector corners,
opencv_core.Mat ids,
opencv_aruco.Board board,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec) |
static int |
opencv_aruco.estimatePoseBoard(opencv_core.GpuMatVector corners,
opencv_core.Mat ids,
opencv_aruco.Board board,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
boolean useExtrinsicGuess) |
static int |
opencv_aruco.estimatePoseBoard(opencv_core.MatVector corners,
opencv_core.Mat ids,
opencv_aruco.Board board,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec) |
static int |
opencv_aruco.estimatePoseBoard(opencv_core.MatVector corners,
opencv_core.Mat ids,
opencv_aruco.Board board,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
boolean useExtrinsicGuess)
\brief Pose estimation for a board of markers
|
static int |
opencv_aruco.estimatePoseBoard(opencv_core.UMatVector corners,
opencv_core.Mat ids,
opencv_aruco.Board board,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec) |
static int |
opencv_aruco.estimatePoseBoard(opencv_core.UMatVector corners,
opencv_core.Mat ids,
opencv_aruco.Board board,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
boolean useExtrinsicGuess) |
static boolean |
opencv_aruco.estimatePoseCharucoBoard(opencv_core.Mat charucoCorners,
opencv_core.Mat charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec) |
static boolean |
opencv_aruco.estimatePoseCharucoBoard(opencv_core.Mat charucoCorners,
opencv_core.Mat charucoIds,
opencv_aruco.CharucoBoard board,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
boolean useExtrinsicGuess)
\brief Pose estimation for a ChArUco board given some of their corners
|
static void |
opencv_aruco.estimatePoseSingleMarkers(opencv_core.GpuMatVector corners,
float markerLength,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvecs,
opencv_core.Mat tvecs) |
static void |
opencv_aruco.estimatePoseSingleMarkers(opencv_core.GpuMatVector corners,
float markerLength,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvecs,
opencv_core.Mat tvecs,
opencv_core.Mat _objPoints) |
static void |
opencv_aruco.estimatePoseSingleMarkers(opencv_core.MatVector corners,
float markerLength,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvecs,
opencv_core.Mat tvecs) |
static void |
opencv_aruco.estimatePoseSingleMarkers(opencv_core.MatVector corners,
float markerLength,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvecs,
opencv_core.Mat tvecs,
opencv_core.Mat _objPoints)
\brief Pose estimation for single markers
|
static void |
opencv_aruco.estimatePoseSingleMarkers(opencv_core.UMatVector corners,
float markerLength,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvecs,
opencv_core.Mat tvecs) |
static void |
opencv_aruco.estimatePoseSingleMarkers(opencv_core.UMatVector corners,
float markerLength,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvecs,
opencv_core.Mat tvecs,
opencv_core.Mat _objPoints) |
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.Mat src,
opencv_core.Mat dst,
boolean fullAffine)
\brief Computes an optimal affine transformation between two 2D point sets.
|
static opencv_core.Mat |
opencv_video.estimateRigidTransform(opencv_core.Mat src,
opencv_core.Mat dst,
boolean fullAffine,
int ransacMaxIters,
double ransacGoodRatio,
int ransacSize0) |
void |
opencv_shape.ShapeTransformer.estimateTransformation(opencv_core.Mat transformingShape,
opencv_core.Mat targetShape,
opencv_core.DMatchVector matches)
\brief Estimate the transformation parameters of the current transformer algorithm, based on point matches.
|
float |
opencv_tracking.StrongClassifierDirectSelection.eval(opencv_core.Mat response) |
int |
opencv_tracking.BaseClassifier.eval(opencv_core.Mat image) |
void |
opencv_text.OCRHMMDecoder.ClassifierCallback.eval(opencv_core.Mat image,
int[] out_class,
opencv_text.DoubleVector out_confidence) |
void |
opencv_text.OCRHMMDecoder.ClassifierCallback.eval(opencv_core.Mat image,
IntBuffer out_class,
opencv_text.DoubleVector out_confidence) |
void |
opencv_text.OCRHMMDecoder.ClassifierCallback.eval(opencv_core.Mat image,
org.bytedeco.javacpp.IntPointer out_class,
opencv_text.DoubleVector out_confidence)
\brief The character classifier must return a (ranked list of) class(es) id('s)
|
boolean |
opencv_tracking.CvHaarEvaluator.FeatureHaar.eval(opencv_core.Mat image,
opencv_core.Rect ROI,
float[] result) |
boolean |
opencv_tracking.CvHaarEvaluator.FeatureHaar.eval(opencv_core.Mat image,
opencv_core.Rect ROI,
FloatBuffer result) |
boolean |
opencv_tracking.CvHaarEvaluator.FeatureHaar.eval(opencv_core.Mat image,
opencv_core.Rect ROI,
org.bytedeco.javacpp.FloatPointer result) |
void |
opencv_text.OCRBeamSearchDecoder.ClassifierCallback.eval(opencv_core.Mat image,
opencv_text.DoubleVector recognition_probabilities,
int[] oversegmentation) |
void |
opencv_text.OCRBeamSearchDecoder.ClassifierCallback.eval(opencv_core.Mat image,
opencv_text.DoubleVector recognition_probabilities,
IntBuffer oversegmentation) |
void |
opencv_text.OCRBeamSearchDecoder.ClassifierCallback.eval(opencv_core.Mat image,
opencv_text.DoubleVector recognition_probabilities,
org.bytedeco.javacpp.IntPointer oversegmentation)
\brief The character classifier must return a (ranked list of) class(es) id('s)
|
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_core.KeyPointVector keypoints1,
opencv_core.KeyPointVector keypoints2,
float[] repeatability,
int[] correspCount) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_core.KeyPointVector keypoints1,
opencv_core.KeyPointVector keypoints2,
float[] repeatability,
int[] correspCount,
opencv_features2d.Feature2D fdetector) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_core.KeyPointVector keypoints1,
opencv_core.KeyPointVector keypoints2,
FloatBuffer repeatability,
IntBuffer correspCount) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_core.KeyPointVector keypoints1,
opencv_core.KeyPointVector keypoints2,
FloatBuffer repeatability,
IntBuffer correspCount,
opencv_features2d.Feature2D fdetector) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_core.KeyPointVector keypoints1,
opencv_core.KeyPointVector keypoints2,
org.bytedeco.javacpp.FloatPointer repeatability,
org.bytedeco.javacpp.IntPointer correspCount) |
static void |
opencv_features2d.evaluateFeatureDetector(opencv_core.Mat img1,
opencv_core.Mat img2,
opencv_core.Mat H1to2,
opencv_core.KeyPointVector keypoints1,
opencv_core.KeyPointVector keypoints2,
org.bytedeco.javacpp.FloatPointer repeatability,
org.bytedeco.javacpp.IntPointer correspCount,
opencv_features2d.Feature2D fdetector)
\} features2d_draw
|
static void |
opencv_cudaimgproc.evenLevels(opencv_core.Mat levels,
int nLevels,
int lowerLevel,
int upperLevel) |
static void |
opencv_cudaimgproc.evenLevels(opencv_core.Mat levels,
int nLevels,
int lowerLevel,
int upperLevel,
opencv_core.Stream stream)
\brief Computes levels with even distribution.
|
static void |
opencv_core.exp(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Calculates the exponent of every array element.
|
static void |
opencv_cudaarithm.exp(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.exp(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Computes an exponent of each matrix element.
|
static void |
opencv_core.extractChannel(opencv_core.Mat src,
opencv_core.Mat dst,
int coi)
\brief Extracts a single channel from src (coi is 0-based index)
|
static void |
opencv_core.extractImageCOI(opencv_core.CvArr arr,
opencv_core.Mat coiimg) |
static void |
opencv_core.extractImageCOI(opencv_core.CvArr arr,
opencv_core.Mat coiimg,
int coi)
extracts Channel of Interest from CvMat or IplImage and makes cv::Mat out of it.
|
boolean |
opencv_tracking.TrackerFeatureHAAR.extractSelected(int[] selFeatures,
opencv_core.MatVector images,
opencv_core.Mat response) |
boolean |
opencv_tracking.TrackerFeatureHAAR.extractSelected(IntBuffer selFeatures,
opencv_core.MatVector images,
opencv_core.Mat response) |
boolean |
opencv_tracking.TrackerFeatureHAAR.extractSelected(org.bytedeco.javacpp.IntPointer selFeatures,
opencv_core.MatVector images,
opencv_core.Mat response)
\brief Compute the features only for the selected indices in the images collection
|
void |
opencv_xphoto.LearningBasedWB.extractSimpleFeatures(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Implements the feature extraction part of the algorithm.
|
static void |
opencv_features2d.FAST(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
int threshold) |
static void |
opencv_features2d.FAST(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
int threshold,
boolean nonmaxSuppression)
\overload
|
static void |
opencv_features2d.FAST(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
int threshold,
boolean nonmaxSuppression,
int type)
\brief Detects corners using the FAST algorithm
|
static void |
opencv_xfeatures2d.FASTForPointSet(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
int threshold) |
static void |
opencv_xfeatures2d.FASTForPointSet(opencv_core.Mat image,
opencv_core.KeyPointVector keypoints,
int threshold,
boolean nonmaxSuppression,
int type)
\brief Estimates cornerness for prespecified KeyPoints using the FAST algorithm
|
static void |
opencv_ximgproc.fastGlobalSmootherFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
double lambda,
double sigma_color) |
static void |
opencv_ximgproc.fastGlobalSmootherFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter)
\brief Simple one-line Fast Global Smoother filter call.
|
static void |
opencv_ximgproc.FastHoughTransform(opencv_core.Mat src,
opencv_core.Mat dst,
int dstMatDepth) |
static void |
opencv_ximgproc.FastHoughTransform(opencv_core.Mat src,
opencv_core.Mat dst,
int dstMatDepth,
int angleRange,
int op,
int makeSkew)
\brief Calculates 2D Fast Hough transform of an image.
|
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
float h) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
float[] h) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
float[] h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
FloatBuffer h) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
FloatBuffer h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
float h,
int templateWindowSize,
int searchWindowSize)
\addtogroup photo_denoise
\{
|
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
float h,
int search_window,
int block_size,
opencv_core.Stream stream)
\brief Perform image denoising using Non-local Means Denoising algorithm
|
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
org.bytedeco.javacpp.FloatPointer h) |
static void |
opencv_photo.fastNlMeansDenoising(opencv_core.Mat src,
opencv_core.Mat dst,
org.bytedeco.javacpp.FloatPointer h,
int templateWindowSize,
int searchWindowSize,
int normType)
\brief Perform image denoising using Non-local Means Denoising algorithm
|
static void |
opencv_photo.fastNlMeansDenoisingColored(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_photo.fastNlMeansDenoisingColored(opencv_core.Mat src,
opencv_core.Mat dst,
float h_luminance,
float photo_render) |
static void |
opencv_photo.fastNlMeansDenoisingColored(opencv_core.Mat src,
opencv_core.Mat dst,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize)
\brief Modification of fastNlMeansDenoising function for colored images
|
static void |
opencv_photo.fastNlMeansDenoisingColored(opencv_core.Mat src,
opencv_core.Mat dst,
float h_luminance,
float photo_render,
int search_window,
int block_size,
opencv_core.Stream stream)
\brief Modification of fastNlMeansDenoising function for colored images
|
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.GpuMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.GpuMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.MatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.MatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize)
\brief Modification of fastNlMeansDenoisingMulti function for colored images sequences
|
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.UMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(opencv_core.UMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.GpuMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.GpuMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float[] h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.GpuMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float[] h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.GpuMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
int templateWindowSize,
int searchWindowSize)
\brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
captured in small period of time.
|
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
org.bytedeco.javacpp.FloatPointer h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.MatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
org.bytedeco.javacpp.FloatPointer h,
int templateWindowSize,
int searchWindowSize,
int normType)
\brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
captured in small period of time.
|
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatBuffer h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatBuffer h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(opencv_core.UMatVector srcImgs,
opencv_core.Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
int templateWindowSize,
int searchWindowSize) |
void |
opencv_stitching.Blender.feed(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Point tl)
\brief Processes the image.
|
void |
opencv_stitching.FeatherBlender.feed(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Point tl) |
void |
opencv_stitching.MultiBandBlender.feed(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Point tl) |
static void |
opencv_imgproc.fillConvexPoly(opencv_core.Mat img,
opencv_core.Mat points,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillConvexPoly(opencv_core.Mat img,
opencv_core.Mat points,
opencv_core.Scalar color,
int lineType,
int shift)
\brief Fills a convex polygon.
|
static void |
opencv_imgproc.fillConvexPoly(opencv_core.Mat img,
opencv_core.Point pts,
int npts,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillConvexPoly(opencv_core.Mat img,
opencv_core.Point pts,
int npts,
opencv_core.Scalar color,
int lineType,
int shift)
\overload
|
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.GpuMatVector pts,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.GpuMatVector pts,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.MatVector pts,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.MatVector pts,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset)
\brief Fills the area bounded by one or more polygons.
|
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
org.bytedeco.javacpp.IntPointer npts,
int ncontours,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.Point pts,
org.bytedeco.javacpp.IntPointer npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.UMatVector pts,
opencv_core.Scalar color) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
opencv_core.UMatVector pts,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset) |
static void |
opencv_imgproc.fillPoly(opencv_core.Mat img,
org.bytedeco.javacpp.PointerPointer pts,
org.bytedeco.javacpp.IntPointer npts,
int ncontours,
opencv_core.Scalar color,
int lineType,
int shift,
opencv_core.Point offset)
\overload
|
void |
opencv_ximgproc.DTFilter.filter(opencv_core.Mat src,
opencv_core.Mat dst) |
void |
opencv_ximgproc.GuidedFilter.filter(opencv_core.Mat src,
opencv_core.Mat dst) |
void |
opencv_ximgproc.AdaptiveManifoldFilter.filter(opencv_core.Mat src,
opencv_core.Mat dst) |
void |
opencv_ximgproc.FastGlobalSmootherFilter.filter(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Apply smoothing operation to the source image.
|
void |
opencv_ximgproc.DTFilter.filter(opencv_core.Mat src,
opencv_core.Mat dst,
int dDepth)
\brief Produce domain transform filtering operation on source image.
|
void |
opencv_ximgproc.GuidedFilter.filter(opencv_core.Mat src,
opencv_core.Mat dst,
int dDepth)
\brief Apply Guided Filter to the filtering image.
|
void |
opencv_ximgproc.AdaptiveManifoldFilter.filter(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat joint)
\brief Apply high-dimensional filtering using adaptive manifolds.
|
void |
opencv_ximgproc.DisparityFilter.filter(opencv_core.Mat disparity_map_left,
opencv_core.Mat left_view,
opencv_core.Mat filtered_disparity_map) |
void |
opencv_ximgproc.DisparityFilter.filter(opencv_core.Mat disparity_map_left,
opencv_core.Mat left_view,
opencv_core.Mat filtered_disparity_map,
opencv_core.Mat disparity_map_right,
opencv_core.Rect ROI,
opencv_core.Mat right_view)
\brief Apply filtering to the disparity map.
|
static void |
opencv_imgproc.filter2D(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Mat kernel) |
static void |
opencv_imgproc.filter2D(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Mat kernel,
opencv_core.Point anchor,
double delta,
int borderType)
\brief Convolves an image with the kernel.
|
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(opencv_core.GpuMatVector rotations,
opencv_core.GpuMatVector normals,
opencv_core.Mat beforePoints,
opencv_core.Mat afterPoints,
opencv_core.Mat possibleSolutions) |
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(opencv_core.GpuMatVector rotations,
opencv_core.GpuMatVector normals,
opencv_core.Mat beforePoints,
opencv_core.Mat afterPoints,
opencv_core.Mat possibleSolutions,
opencv_core.Mat pointsMask) |
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(opencv_core.MatVector rotations,
opencv_core.MatVector normals,
opencv_core.Mat beforePoints,
opencv_core.Mat afterPoints,
opencv_core.Mat possibleSolutions) |
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(opencv_core.MatVector rotations,
opencv_core.MatVector normals,
opencv_core.Mat beforePoints,
opencv_core.Mat afterPoints,
opencv_core.Mat possibleSolutions,
opencv_core.Mat pointsMask)
\brief Filters homography decompositions based on additional information.
|
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(opencv_core.UMatVector rotations,
opencv_core.UMatVector normals,
opencv_core.Mat beforePoints,
opencv_core.Mat afterPoints,
opencv_core.Mat possibleSolutions) |
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(opencv_core.UMatVector rotations,
opencv_core.UMatVector normals,
opencv_core.Mat beforePoints,
opencv_core.Mat afterPoints,
opencv_core.Mat possibleSolutions,
opencv_core.Mat pointsMask) |
static void |
opencv_calib3d.filterSpeckles(opencv_core.Mat img,
double newVal,
int maxSpeckleSize,
double maxDiff) |
static void |
opencv_calib3d.filterSpeckles(opencv_core.Mat img,
double newVal,
int maxSpeckleSize,
double maxDiff,
opencv_core.Mat buf)
\brief Filters off small noise blobs (speckles) in the disparity map
|
static boolean |
opencv_calib3d.find4QuadCornerSubpix(opencv_core.Mat img,
opencv_core.Mat corners,
opencv_core.Size region_size)
finds subpixel-accurate positions of the chessboard corners
|
static boolean |
opencv_calib3d.findChessboardCorners(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat corners) |
static boolean |
opencv_calib3d.findChessboardCorners(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat corners,
int flags)
\brief Finds the positions of internal corners of the chessboard.
|
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers) |
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers,
int flags,
opencv_features2d.Feature2D blobDetector)
\overload
|
static boolean |
opencv_calib3d.findCirclesGrid(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers,
int flags,
opencv_features2d.Feature2D blobDetector,
opencv_calib3d.CirclesGridFinderParameters parameters)
\brief Finds centers in the grid of circles.
|
static boolean |
opencv_calib3d.findCirclesGrid2(opencv_core.Mat image,
opencv_core.Size patternSize,
opencv_core.Mat centers,
int flags,
opencv_features2d.Feature2D blobDetector,
opencv_calib3d.CirclesGridFinderParameters2 parameters)
\overload
|
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.GpuMatVector contours,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.GpuMatVector contours,
int mode,
int method,
opencv_core.Point offset) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.GpuMatVector contours,
opencv_core.Mat hierarchy,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.GpuMatVector contours,
opencv_core.Mat hierarchy,
int mode,
int method,
opencv_core.Point offset) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.MatVector contours,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.MatVector contours,
int mode,
int method,
opencv_core.Point offset)
\overload
|
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.MatVector contours,
opencv_core.Mat hierarchy,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.MatVector contours,
opencv_core.Mat hierarchy,
int mode,
int method,
opencv_core.Point offset)
\brief Finds contours in a binary image.
|
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.UMatVector contours,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.UMatVector contours,
int mode,
int method,
opencv_core.Point offset) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.UMatVector contours,
opencv_core.Mat hierarchy,
int mode,
int method) |
static void |
opencv_imgproc.findContours(opencv_core.Mat image,
opencv_core.UMatVector contours,
opencv_core.Mat hierarchy,
int mode,
int method,
opencv_core.Point offset) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.Mat points1,
opencv_core.Mat points2) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.Mat points1,
opencv_core.Mat points2,
double focal,
opencv_core.Point2d pp,
int method,
double prob,
double threshold,
opencv_core.Mat mask)
\overload
|
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat cameraMatrix) |
static opencv_core.Mat |
opencv_calib3d.findEssentialMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat cameraMatrix,
int method,
double prob,
double threshold,
opencv_core.Mat mask)
\brief Calculates an essential matrix from the corresponding points in two images.
|
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
int method,
double ransacReprojThreshold,
double confidence,
opencv_core.Mat mask)
\brief Calculates a fundamental matrix from the corresponding points in two images.
|
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat mask) |
static opencv_core.Mat |
opencv_calib3d.findFundamentalMat(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat mask,
int method,
double ransacReprojThreshold,
double confidence)
\overload
|
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
int method,
double ransacReprojThreshold,
opencv_core.Mat mask,
int maxIters,
double confidence)
\brief Finds a perspective transformation between two planes.
|
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
opencv_core.Mat mask) |
static opencv_core.Mat |
opencv_calib3d.findHomography(opencv_core.Mat srcPoints,
opencv_core.Mat dstPoints,
opencv_core.Mat mask,
int method,
double ransacReprojThreshold)
\overload
|
static void |
opencv_cudaarithm.findMinMax(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.findMinMax(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask,
opencv_core.Stream stream)
\overload
|
static void |
opencv_cudaarithm.findMinMaxLoc(opencv_core.Mat src,
opencv_core.Mat minMaxVals,
opencv_core.Mat loc) |
static void |
opencv_cudaarithm.findMinMaxLoc(opencv_core.Mat src,
opencv_core.Mat minMaxVals,
opencv_core.Mat loc,
opencv_core.Mat mask,
opencv_core.Stream stream)
\overload
|
float |
opencv_ml.KNearest.findNearest(opencv_core.Mat samples,
int k,
opencv_core.Mat results) |
float |
opencv_ml.KNearest.findNearest(opencv_core.Mat samples,
int k,
opencv_core.Mat results,
opencv_core.Mat neighborResponses,
opencv_core.Mat dist)
\brief Finds the neighbors and predicts responses for input vectors.
|
static void |
opencv_core.findNonZero(opencv_core.Mat src,
opencv_core.Mat idx)
\brief Returns the list of locations of non-zero pixels
|
void |
opencv_structured_light.SinusoidalPattern.findProCamMatches(opencv_core.Mat projUnwrappedPhaseMap,
opencv_core.Mat camUnwrappedPhaseMap,
opencv_core.GpuMatVector matches) |
void |
opencv_structured_light.SinusoidalPattern.findProCamMatches(opencv_core.Mat projUnwrappedPhaseMap,
opencv_core.Mat camUnwrappedPhaseMap,
opencv_core.MatVector matches)
\brief Find correspondences between the two devices thanks to unwrapped phase maps.
|
void |
opencv_structured_light.SinusoidalPattern.findProCamMatches(opencv_core.Mat projUnwrappedPhaseMap,
opencv_core.Mat camUnwrappedPhaseMap,
opencv_core.UMatVector matches) |
static double |
opencv_video.findTransformECC(opencv_core.Mat templateImage,
opencv_core.Mat inputImage,
opencv_core.Mat warpMatrix) |
static double |
opencv_video.findTransformECC(opencv_core.Mat templateImage,
opencv_core.Mat inputImage,
opencv_core.Mat warpMatrix,
int motionType,
opencv_core.TermCriteria criteria,
opencv_core.Mat inputMask)
\brief Finds the geometric transform (warp) between two images in terms of the ECC criterion \cite EP08 .
|
opencv_core.MatBytePairVector |
opencv_core.MatBytePairVector.first(long i,
opencv_core.Mat first) |
boolean |
opencv_face.Facemark.fit(opencv_core.Mat image,
opencv_core.RectVector faces,
opencv_core.Point2fVectorVector landmarks)
\brief Detect facial landmarks from an image.
|
boolean |
opencv_face.FacemarkAAM.fitConfig(opencv_core.Mat image,
opencv_core.RectVector roi,
opencv_core.Point2fVectorVector _landmarks,
opencv_face.FacemarkAAM.Config runtime_params)
overload with additional Config structures
|
static opencv_core.RotatedRect |
opencv_imgproc.fitEllipse(opencv_core.Mat points)
\brief Fits an ellipse around a set of 2D points.
|
static opencv_core.RotatedRect |
opencv_imgproc.fitEllipseAMS(opencv_core.Mat points)
\brief Fits an ellipse around a set of 2D points.
|
static opencv_core.RotatedRect |
opencv_imgproc.fitEllipseDirect(opencv_core.Mat points)
\brief Fits an ellipse around a set of 2D points.
|
static void |
opencv_imgproc.fitLine(opencv_core.Mat points,
opencv_core.Mat line,
int distType,
double param,
double reps,
double aeps)
\brief Fits a line to a 2D or 3D point set.
|
static void |
opencv_core.flip(opencv_core.Mat src,
opencv_core.Mat dst,
int flipCode)
\brief Flips a 2D array around vertical, horizontal, or both axes.
|
static void |
opencv_cudaarithm.flip(opencv_core.Mat src,
opencv_core.Mat dst,
int flipCode) |
static void |
opencv_cudaarithm.flip(opencv_core.Mat src,
opencv_core.Mat dst,
int flipCode,
opencv_core.Stream stream)
\brief Flips a 2D matrix around vertical, horizontal, or both axes.
|
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal) |
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Mat mask,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags)
\brief Fills a connected component with the given color.
|
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal) |
static int |
opencv_imgproc.floodFill(opencv_core.Mat image,
opencv_core.Point seedPoint,
opencv_core.Scalar newVal,
opencv_core.Rect rect,
opencv_core.Scalar loDiff,
opencv_core.Scalar upDiff,
int flags)
\overload
|
static void |
opencv_stitching.focalsFromHomography(opencv_core.Mat H,
double[] f0,
double[] f1,
boolean[] f0_ok,
boolean[] f1_ok) |
static void |
opencv_stitching.focalsFromHomography(opencv_core.Mat H,
double[] f0,
double[] f1,
org.bytedeco.javacpp.BoolPointer f0_ok,
org.bytedeco.javacpp.BoolPointer f1_ok) |
static void |
opencv_stitching.focalsFromHomography(opencv_core.Mat H,
DoubleBuffer f0,
DoubleBuffer f1,
boolean[] f0_ok,
boolean[] f1_ok) |
static void |
opencv_stitching.focalsFromHomography(opencv_core.Mat H,
DoubleBuffer f0,
DoubleBuffer f1,
org.bytedeco.javacpp.BoolPointer f0_ok,
org.bytedeco.javacpp.BoolPointer f1_ok) |
static void |
opencv_stitching.focalsFromHomography(opencv_core.Mat H,
org.bytedeco.javacpp.DoublePointer f0,
org.bytedeco.javacpp.DoublePointer f1,
boolean[] f0_ok,
boolean[] f1_ok) |
static void |
opencv_stitching.focalsFromHomography(opencv_core.Mat H,
org.bytedeco.javacpp.DoublePointer f0,
org.bytedeco.javacpp.DoublePointer f1,
org.bytedeco.javacpp.BoolPointer f0_ok,
org.bytedeco.javacpp.BoolPointer f1_ok)
\addtogroup stitching_autocalib
\{
|
opencv_core.Formatted |
opencv_core.Formatter.format(opencv_core.Mat mtx) |
static opencv_core.Formatted |
opencv_core.format(opencv_core.Mat mtx,
int fmt) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.gain(opencv_core.Mat gain) |
static void |
opencv_cudaimgproc.gammaCorrection(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaimgproc.gammaCorrection(opencv_core.Mat src,
opencv_core.Mat dst,
boolean forward,
opencv_core.Stream stream)
\brief Routines for correcting image color gamma.
|
static void |
opencv_imgproc.GaussianBlur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
double sigmaX) |
static void |
opencv_imgproc.GaussianBlur(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size ksize,
double sigmaX,
double sigmaY,
int borderType)
\brief Blurs an image using a Gaussian filter.
|
static void |
opencv_core.gemm(opencv_core.Mat src1,
opencv_core.Mat src2,
double alpha,
opencv_core.Mat src3,
double beta,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.gemm(opencv_core.Mat src1,
opencv_core.Mat src2,
double alpha,
opencv_core.Mat src3,
double beta,
opencv_core.Mat dst) |
static void |
opencv_core.gemm(opencv_core.Mat src1,
opencv_core.Mat src2,
double alpha,
opencv_core.Mat src3,
double beta,
opencv_core.Mat dst,
int flags)
\brief Performs generalized matrix multiplication.
|
static void |
opencv_cudaarithm.gemm(opencv_core.Mat src1,
opencv_core.Mat src2,
double alpha,
opencv_core.Mat src3,
double beta,
opencv_core.Mat dst,
int flags,
opencv_core.Stream stream)
\} cudaarithm_reduce
|
opencv_core.Mat |
opencv_objdetect.BaseCascadeClassifier.MaskGenerator.generateMask(opencv_core.Mat src) |
static opencv_core.Mat |
opencv_imgproc.getAffineTransform(opencv_core.Mat src,
opencv_core.Mat dst) |
void |
opencv_bgsegm.BackgroundSubtractorCNT.getBackgroundImage(opencv_core.Mat backgroundImage) |
void |
opencv_bgsegm.BackgroundSubtractorGSOC.getBackgroundImage(opencv_core.Mat backgroundImage) |
void |
opencv_bgsegm.BackgroundSubtractorLSBP.getBackgroundImage(opencv_core.Mat backgroundImage) |
void |
opencv_video.BackgroundSubtractor.getBackgroundImage(opencv_core.Mat backgroundImage)
\brief Computes a background image.
|
static opencv_core.Mat |
opencv_aruco.Dictionary.getBitsFromByteList(opencv_core.Mat byteList,
int markerSize)
\brief Transform list of bytes to matrix of bits
|
static void |
opencv_aruco.getBoardObjectAndImagePoints(opencv_aruco.Board board,
opencv_core.GpuMatVector detectedCorners,
opencv_core.Mat detectedIds,
opencv_core.Mat objPoints,
opencv_core.Mat imgPoints) |
static void |
opencv_aruco.getBoardObjectAndImagePoints(opencv_aruco.Board board,
opencv_core.MatVector detectedCorners,
opencv_core.Mat detectedIds,
opencv_core.Mat objPoints,
opencv_core.Mat imgPoints)
\brief Given a board configuration and a set of detected markers, returns the corresponding
image points and object points to call solvePnP
|
static void |
opencv_aruco.getBoardObjectAndImagePoints(opencv_aruco.Board board,
opencv_core.UMatVector detectedCorners,
opencv_core.Mat detectedIds,
opencv_core.Mat objPoints,
opencv_core.Mat imgPoints) |
static opencv_core.Mat |
opencv_aruco.Dictionary.getByteListFromBits(opencv_core.Mat bits)
\brief Transform matrix of bits to list of bytes in the 4 rotations
|
double |
opencv_ml.SVM.getDecisionFunction(int i,
opencv_core.Mat alpha,
opencv_core.Mat svidx)
\brief Retrieves the decision function
|
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.Mat cameraMatrix) |
static opencv_core.Mat |
opencv_imgproc.getDefaultNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Size imgsize,
boolean centerPrincipalPoint)
\brief Returns the default new camera matrix.
|
static void |
opencv_imgproc.getDerivKernels(opencv_core.Mat kx,
opencv_core.Mat ky,
int dx,
int dy,
int ksize) |
static void |
opencv_imgproc.getDerivKernels(opencv_core.Mat kx,
opencv_core.Mat ky,
int dx,
int dy,
int ksize,
boolean normalize,
int ktype)
\brief Returns filter coefficients for computing spatial image derivatives.
|
static void |
opencv_ximgproc.getDisparityVis(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_ximgproc.getDisparityVis(opencv_core.Mat src,
opencv_core.Mat dst,
double scale)
\brief Function for creating a disparity map visualization (clamped CV_8U image)
|
int |
opencv_aruco.Dictionary.getDistanceToId(opencv_core.Mat bits,
int id) |
int |
opencv_aruco.Dictionary.getDistanceToId(opencv_core.Mat bits,
int id,
boolean allRotations)
\brief Returns the distance of the input bits to the specific id.
|
boolean |
opencv_face.FacemarkTrain.getFaces(opencv_core.Mat image,
opencv_core.RectVector faces)
\brief Detect faces from a given image using default or user defined face detector.
|
boolean |
opencv_face.FacemarkKazemi.getFaces(opencv_core.Mat image,
opencv_core.RectVector faces)
get faces using the custom detector
|
static boolean |
opencv_face.getFaces(opencv_core.Mat image,
opencv_core.RectVector faces,
opencv_face.CParams params)
\brief Default face detector
This function is mainly utilized by the implementation of a Facemark Algorithm.
|
static boolean |
opencv_face.getFacesHAAR(opencv_core.Mat image,
opencv_core.RectVector faces,
String face_cascade_name) |
void |
opencv_ximgproc.RFFeatureGetter.getFeatures(opencv_core.Mat src,
opencv_core.Mat features,
int gnrmRad,
int gsmthRad,
int shrink,
int outNum,
int gradNum)
This functions extracts feature channels from src.
|
void |
opencv_shape.ShapeContextDistanceExtractor.getImages(opencv_core.Mat image1,
opencv_core.Mat image2) |
void |
opencv_structured_light.GrayCodePattern.getImagesForShadowMasks(opencv_core.Mat blackImage,
opencv_core.Mat whiteImage)
\brief Generates the all-black and all-white images needed for shadowMasks computation.
|
void |
opencv_core.DownhillSolver.getInitStep(opencv_core.Mat step)
\brief Returns the initial step that will be used in downhill simplex algorithm.
|
void |
opencv_phase_unwrapping.HistogramPhaseUnwrapping.getInverseReliabilityMap(opencv_core.Mat reliabilityMap)
\brief Get the reliability map computed from the wrapped phase map.
|
void |
opencv_ximgproc.SuperpixelSEEDS.getLabelContourMask(opencv_core.Mat image) |
void |
opencv_ximgproc.SuperpixelSLIC.getLabelContourMask(opencv_core.Mat image) |
void |
opencv_ximgproc.SuperpixelLSC.getLabelContourMask(opencv_core.Mat image) |
void |
opencv_ximgproc.SuperpixelSEEDS.getLabelContourMask(opencv_core.Mat image,
boolean thick_line)
\brief Returns the mask of the superpixel segmentation stored in SuperpixelSEEDS object.
|
void |
opencv_ximgproc.SuperpixelSLIC.getLabelContourMask(opencv_core.Mat image,
boolean thick_line)
\brief Returns the mask of the superpixel segmentation stored in SuperpixelSLIC object.
|
void |
opencv_ximgproc.SuperpixelLSC.getLabelContourMask(opencv_core.Mat image,
boolean thick_line)
\brief Returns the mask of the superpixel segmentation stored in SuperpixelLSC object.
|
void |
opencv_ximgproc.SuperpixelSEEDS.getLabels(opencv_core.Mat labels_out)
\brief Returns the segmentation labeling of the image.
|
void |
opencv_ximgproc.SuperpixelSLIC.getLabels(opencv_core.Mat labels_out)
\brief Returns the segmentation labeling of the image.
|
void |
opencv_ximgproc.SuperpixelLSC.getLabels(opencv_core.Mat labels_out)
\brief Returns the segmentation labeling of the image.
|
void |
opencv_bioinspired.Retina.getMagno(opencv_core.Mat retinaOutput_magno)
\brief Accessor of the motion channel of the retina (models peripheral vision).
|
void |
opencv_bioinspired.Retina.getMagnoRAW(opencv_core.Mat retinaOutput_magno)
\brief Accessor of the motion channel of the retina (models peripheral vision).
|
void |
opencv_bgsegm.SyntheticSequenceGenerator.getNextFrame(opencv_core.Mat frame,
opencv_core.Mat gtMask)
\brief Obtain the next frame in the sequence.
|
void |
opencv_ml.TrainData.getNormCatValues(int vi,
opencv_core.Mat sidx,
int[] values) |
void |
opencv_ml.TrainData.getNormCatValues(int vi,
opencv_core.Mat sidx,
IntBuffer values) |
void |
opencv_ml.TrainData.getNormCatValues(int vi,
opencv_core.Mat sidx,
org.bytedeco.javacpp.IntPointer values) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha) |
static opencv_core.Mat |
opencv_calib3d.getOptimalNewCameraMatrix(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect validPixROI,
boolean centerPrincipalPoint)
\brief Returns the new camera matrix based on the free scaling parameter.
|
void |
opencv_bioinspired.Retina.getParvo(opencv_core.Mat retinaOutput_parvo)
\brief Accessor of the details channel of the retina (models foveal vision).
|
void |
opencv_bioinspired.Retina.getParvoRAW(opencv_core.Mat retinaOutput_parvo)
\brief Accessor of the details channel of the retina (models foveal vision).
|
static opencv_core.Mat |
opencv_imgproc.getPerspectiveTransform(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Calculates a perspective transform from four pairs of the corresponding points.
|
opencv_core.Mat |
opencv_img_hash.RadialVarianceHash.getPixPerLine(opencv_core.Mat input) |
static opencv_core.Mat |
opencv_dnn.getPlane(opencv_core.Mat m,
int n,
int cn) |
static void |
opencv_imgproc.getRectSubPix(opencv_core.Mat image,
opencv_core.Size patchSize,
opencv_core.Point2f center,
opencv_core.Mat patch) |
static void |
opencv_imgproc.getRectSubPix(opencv_core.Mat image,
opencv_core.Size patchSize,
opencv_core.Point2f center,
opencv_core.Mat patch,
int patchType)
\brief Retrieves a pixel rectangle from an image with sub-pixel accuracy.
|
void |
opencv_ml.TrainData.getSample(opencv_core.Mat varIdx,
int sidx,
float[] buf) |
void |
opencv_ml.TrainData.getSample(opencv_core.Mat varIdx,
int sidx,
FloatBuffer buf) |
void |
opencv_ml.TrainData.getSample(opencv_core.Mat varIdx,
int sidx,
org.bytedeco.javacpp.FloatPointer buf) |
void |
opencv_dnn.Layer.getScaleShift(opencv_core.Mat scale,
opencv_core.Mat shift)
\brief Returns parameters of layers with channel-wise multiplication and addition.
|
void |
opencv_bioinspired.TransientAreasSegmentationModule.getSegmentationPicture(opencv_core.Mat transientAreas)
\brief access function
|
static opencv_core.Mat |
opencv_ml.TrainData.getSubMatrix(opencv_core.Mat matrix,
opencv_core.Mat idx,
int layout)
\brief Extract from matrix rows/cols specified by passed indexes.
|
static opencv_core.Mat |
opencv_ml.TrainData.getSubVector(opencv_core.Mat vec,
opencv_core.Mat idx)
\brief Extract from 1D vector elements specified by passed indexes.
|
void |
opencv_ml.TrainData.getValues(int vi,
opencv_core.Mat sidx,
float[] values) |
void |
opencv_ml.TrainData.getValues(int vi,
opencv_core.Mat sidx,
FloatBuffer values) |
void |
opencv_ml.TrainData.getValues(int vi,
opencv_core.Mat sidx,
org.bytedeco.javacpp.FloatPointer values) |
void |
opencv_ml.RTrees.getVotes(opencv_core.Mat samples,
opencv_core.Mat results,
int flags)
Returns the result of each individual tree in the forest.
|
static void |
opencv_imgproc.goodFeaturesToTrack(opencv_core.Mat image,
opencv_core.Mat corners,
int maxCorners,
double qualityLevel,
double minDistance) |
static void |
opencv_imgproc.goodFeaturesToTrack(opencv_core.Mat image,
opencv_core.Mat corners,
int maxCorners,
double qualityLevel,
double minDistance,
opencv_core.Mat mask,
int blockSize,
boolean useHarrisDetector,
double k)
\brief Determines strong corners on an image.
|
static void |
opencv_imgproc.goodFeaturesToTrack(opencv_core.Mat image,
opencv_core.Mat corners,
int maxCorners,
double qualityLevel,
double minDistance,
opencv_core.Mat mask,
int blockSize,
int gradientSize) |
static void |
opencv_imgproc.goodFeaturesToTrack(opencv_core.Mat image,
opencv_core.Mat corners,
int maxCorners,
double qualityLevel,
double minDistance,
opencv_core.Mat mask,
int blockSize,
int gradientSize,
boolean useHarrisDetector,
double k) |
static void |
opencv_imgproc.grabCut(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Rect rect,
opencv_core.Mat bgdModel,
opencv_core.Mat fgdModel,
int iterCount) |
static void |
opencv_imgproc.grabCut(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Rect rect,
opencv_core.Mat bgdModel,
opencv_core.Mat fgdModel,
int iterCount,
int mode)
\brief Runs the GrabCut algorithm.
|
static opencv_core.MatExpr |
opencv_core.greaterThan(double s,
opencv_core.Mat a) |
static opencv_core.MatExpr |
opencv_core.greaterThan(opencv_core.Mat a,
double s) |
static opencv_core.MatExpr |
opencv_core.greaterThan(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.MatExpr |
opencv_core.greaterThanEquals(double s,
opencv_core.Mat a) |
static opencv_core.MatExpr |
opencv_core.greaterThanEquals(opencv_core.Mat a,
double s) |
static opencv_core.MatExpr |
opencv_core.greaterThanEquals(opencv_core.Mat a,
opencv_core.Mat b) |
static void |
opencv_ximgproc.guidedFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
int radius,
double eps) |
static void |
opencv_ximgproc.guidedFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
int radius,
double eps,
int dDepth)
\brief Simple one-line Guided Filter call.
|
opencv_stitching.MatchesInfo |
opencv_stitching.MatchesInfo.H(opencv_core.Mat H) |
static void |
opencv_core.hconcat(opencv_core.GpuMatVector src,
opencv_core.Mat dst) |
static void |
opencv_core.hconcat(opencv_core.Mat src,
long nsrc,
opencv_core.GpuMat dst) |
static void |
opencv_core.hconcat(opencv_core.Mat src,
long nsrc,
opencv_core.Mat dst)
\brief Applies horizontal concatenation to given matrices.
|
static void |
opencv_core.hconcat(opencv_core.Mat src,
long nsrc,
opencv_core.UMat dst) |
static void |
opencv_core.hconcat(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst)
\overload
|
static void |
opencv_core.hconcat(opencv_core.MatVector src,
opencv_core.Mat dst)
\overload
|
static void |
opencv_core.hconcat(opencv_core.UMatVector src,
opencv_core.Mat dst) |
static void |
opencv_cudaimgproc.histEven(opencv_core.Mat src,
opencv_core.GpuMat hist,
int[] histSize,
int[] lowerLevel,
int[] upperLevel) |
static void |
opencv_cudaimgproc.histEven(opencv_core.Mat src,
opencv_core.GpuMat hist,
int[] histSize,
int[] lowerLevel,
int[] upperLevel,
opencv_core.Stream stream) |
static void |
opencv_cudaimgproc.histEven(opencv_core.Mat src,
opencv_core.GpuMat hist,
IntBuffer histSize,
IntBuffer lowerLevel,
IntBuffer upperLevel) |
static void |
opencv_cudaimgproc.histEven(opencv_core.Mat src,
opencv_core.GpuMat hist,
IntBuffer histSize,
IntBuffer lowerLevel,
IntBuffer upperLevel,
opencv_core.Stream stream) |
static void |
opencv_cudaimgproc.histEven(opencv_core.Mat src,
opencv_core.GpuMat hist,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.IntPointer lowerLevel,
org.bytedeco.javacpp.IntPointer upperLevel) |
static void |
opencv_cudaimgproc.histEven(opencv_core.Mat src,
opencv_core.GpuMat hist,
org.bytedeco.javacpp.IntPointer histSize,
org.bytedeco.javacpp.IntPointer lowerLevel,
org.bytedeco.javacpp.IntPointer upperLevel,
opencv_core.Stream stream)
\overload
|
static void |
opencv_cudaimgproc.histEven(opencv_core.Mat src,
opencv_core.Mat hist,
int histSize,
int lowerLevel,
int upperLevel) |
static void |
opencv_cudaimgproc.histEven(opencv_core.Mat src,
opencv_core.Mat hist,
int histSize,
int lowerLevel,
int upperLevel,
opencv_core.Stream stream)
\brief Calculates a histogram with evenly distributed bins.
|
static void |
opencv_cudaimgproc.histRange(opencv_core.Mat src,
opencv_core.GpuMat hist,
opencv_core.GpuMat levels) |
static void |
opencv_cudaimgproc.histRange(opencv_core.Mat src,
opencv_core.GpuMat hist,
opencv_core.GpuMat levels,
opencv_core.Stream stream)
\overload
|
static void |
opencv_cudaimgproc.histRange(opencv_core.Mat src,
opencv_core.Mat hist,
opencv_core.Mat levels) |
static void |
opencv_cudaimgproc.histRange(opencv_core.Mat src,
opencv_core.Mat hist,
opencv_core.Mat levels,
opencv_core.Stream stream)
\brief Calculates a histogram with bins determined by the levels array.
|
static void |
opencv_imgproc.HoughCircles(opencv_core.Mat image,
opencv_core.Mat circles,
int method,
double dp,
double minDist) |
static void |
opencv_imgproc.HoughCircles(opencv_core.Mat image,
opencv_core.Mat circles,
int method,
double dp,
double minDist,
double param1,
double param2,
int minRadius,
int maxRadius)
\brief Finds circles in a grayscale image using the Hough transform.
|
static void |
opencv_imgproc.HoughLines(opencv_core.Mat image,
opencv_core.Mat lines,
double rho,
double theta,
int threshold) |
static void |
opencv_imgproc.HoughLines(opencv_core.Mat image,
opencv_core.Mat lines,
double rho,
double theta,
int threshold,
double srn,
double stn,
double min_theta,
double max_theta)
\brief Finds lines in a binary image using the standard Hough transform.
|
static void |
opencv_imgproc.HoughLinesP(opencv_core.Mat image,
opencv_core.Mat lines,
double rho,
double theta,
int threshold) |
static void |
opencv_imgproc.HoughLinesP(opencv_core.Mat image,
opencv_core.Mat lines,
double rho,
double theta,
int threshold,
double minLineLength,
double maxLineGap)
\brief Finds line segments in a binary image using the probabilistic Hough transform.
|
static void |
opencv_imgproc.HoughLinesPointSet(opencv_core.Mat _point,
opencv_core.Mat _lines,
int lines_max,
int threshold,
double min_rho,
double max_rho,
double rho_step,
double min_theta,
double max_theta,
double theta_step)
\brief Finds lines in a set of points using the standard Hough transform.
|
static opencv_core.Scalar4i |
opencv_ximgproc.HoughPoint2Line(opencv_core.Point houghPoint,
opencv_core.Mat srcImgInfo) |
static opencv_core.Scalar4i |
opencv_ximgproc.HoughPoint2Line(opencv_core.Point houghPoint,
opencv_core.Mat srcImgInfo,
int angleRange,
int makeSkew,
int rules)
\brief Calculates coordinates of line segment corresponded by point in Hough space.
|
static void |
opencv_imgproc.HuMoments(opencv_core.Moments m,
opencv_core.Mat hu)
\overload
|
static void |
opencv_core.idct(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.idct(opencv_core.Mat src,
opencv_core.Mat dst,
int flags)
\brief Calculates the inverse Discrete Cosine Transform of a 1D or 2D array.
|
boolean |
opencv_aruco.Dictionary.identify(opencv_core.Mat onlyBits,
int[] idx,
int[] rotation,
double maxCorrectionRate) |
boolean |
opencv_aruco.Dictionary.identify(opencv_core.Mat onlyBits,
IntBuffer idx,
IntBuffer rotation,
double maxCorrectionRate) |
boolean |
opencv_aruco.Dictionary.identify(opencv_core.Mat onlyBits,
org.bytedeco.javacpp.IntPointer idx,
org.bytedeco.javacpp.IntPointer rotation,
double maxCorrectionRate)
\brief Given a matrix of bits.
|
static void |
opencv_core.idft(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.idft(opencv_core.Mat src,
opencv_core.Mat dst,
int flags,
int nonzeroRows)
\brief Calculates the inverse Discrete Fourier Transform of a 1D or 2D array.
|
static void |
opencv_photo.illuminationChange(opencv_core.Mat src,
opencv_core.Mat mask,
opencv_core.Mat dst) |
static void |
opencv_photo.illuminationChange(opencv_core.Mat src,
opencv_core.Mat mask,
opencv_core.Mat dst,
float alpha,
float beta)
\brief Applying an appropriate non-linear transformation to the gradient field inside the selection and
then integrating back with a Poisson solver, modifies locally the apparent illumination of an image.
|
static void |
opencv_dnn.imagesFromBlob(opencv_core.Mat blob_,
opencv_core.GpuMatVector images_) |
static void |
opencv_dnn.imagesFromBlob(opencv_core.Mat blob_,
opencv_core.MatVector images_)
\brief Parse a 4D blob and output the images it contains as 2D arrays through a simpler data structure
(std::vector
|
static void |
opencv_dnn.imagesFromBlob(opencv_core.Mat blob_,
opencv_core.UMatVector images_) |
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.GpuMat buf,
int flags,
opencv_core.Mat dst) |
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.Mat buf,
int flags)
\brief Reads an image from a buffer in memory.
|
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.Mat buf,
int flags,
opencv_core.Mat dst)
\overload
|
static opencv_core.Mat |
opencv_imgcodecs.imdecode(opencv_core.UMat buf,
int flags,
opencv_core.Mat dst) |
static boolean |
opencv_imgcodecs.imencode(org.bytedeco.javacpp.BytePointer ext,
opencv_core.Mat img,
byte[] buf) |
static boolean |
opencv_imgcodecs.imencode(org.bytedeco.javacpp.BytePointer ext,
opencv_core.Mat img,
byte[] buf,
int[] params) |
static boolean |
opencv_imgcodecs.imencode(org.bytedeco.javacpp.BytePointer ext,
opencv_core.Mat img,
ByteBuffer buf) |
static boolean |
opencv_imgcodecs.imencode(org.bytedeco.javacpp.BytePointer ext,
opencv_core.Mat img,
ByteBuffer buf,
IntBuffer params) |
static boolean |
opencv_imgcodecs.imencode(org.bytedeco.javacpp.BytePointer ext,
opencv_core.Mat img,
org.bytedeco.javacpp.BytePointer buf) |
static boolean |
opencv_imgcodecs.imencode(org.bytedeco.javacpp.BytePointer ext,
opencv_core.Mat img,
org.bytedeco.javacpp.BytePointer buf,
org.bytedeco.javacpp.IntPointer params)
\brief Encodes an image into a memory buffer.
|
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.Mat img,
byte[] buf) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.Mat img,
byte[] buf,
int[] params) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.Mat img,
ByteBuffer buf) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.Mat img,
ByteBuffer buf,
IntBuffer params) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.Mat img,
org.bytedeco.javacpp.BytePointer buf) |
static boolean |
opencv_imgcodecs.imencode(String ext,
opencv_core.Mat img,
org.bytedeco.javacpp.BytePointer buf,
org.bytedeco.javacpp.IntPointer params) |
static void |
opencv_highgui.imshow(org.bytedeco.javacpp.BytePointer winname,
opencv_core.Mat mat)
\brief Displays an image in the specified window.
|
static void |
opencv_highgui.imshow(String winname,
opencv_core.Mat mat) |
static boolean |
opencv_imgcodecs.imwrite(org.bytedeco.javacpp.BytePointer filename,
opencv_core.Mat img) |
static boolean |
opencv_imgcodecs.imwrite(org.bytedeco.javacpp.BytePointer filename,
opencv_core.Mat img,
int[] params) |
static boolean |
opencv_imgcodecs.imwrite(org.bytedeco.javacpp.BytePointer filename,
opencv_core.Mat img,
IntBuffer params) |
static boolean |
opencv_imgcodecs.imwrite(org.bytedeco.javacpp.BytePointer filename,
opencv_core.Mat img,
org.bytedeco.javacpp.IntPointer params)
\brief Saves an image to a specified file.
|
static boolean |
opencv_imgcodecs.imwrite(String filename,
opencv_core.Mat img) |
static boolean |
opencv_imgcodecs.imwrite(String filename,
opencv_core.Mat img,
int[] params) |
static boolean |
opencv_imgcodecs.imwrite(String filename,
opencv_core.Mat img,
IntBuffer params) |
static boolean |
opencv_imgcodecs.imwrite(String filename,
opencv_core.Mat img,
org.bytedeco.javacpp.IntPointer params) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
byte[] ptrs) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
byte[] ptrs,
int narrays) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
ByteBuffer ptrs) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
ByteBuffer ptrs,
int narrays) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
org.bytedeco.javacpp.BytePointer ptrs) |
void |
opencv_core.NAryMatIterator.init(opencv_core.Mat arrays,
opencv_core.Mat planes,
org.bytedeco.javacpp.BytePointer ptrs,
int narrays) |
boolean |
opencv_tracking.Tracker.init(opencv_core.Mat image,
opencv_core.Rect2d boundingBox)
\brief Initialize the tracker with a known bounding box that surrounded the target
|
void |
opencv_core.NAryMatIterator.init(org.bytedeco.javacpp.PointerPointer arrays,
opencv_core.Mat planes,
org.bytedeco.javacpp.PointerPointer ptrs,
int narrays)
the separate iterator initialization method
|
void |
opencv_objdetect.BaseCascadeClassifier.MaskGenerator.initializeMask(opencv_core.Mat arg0) |
static void |
opencv_calib3d.initUndistortRectifyMap(opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.Mat R,
opencv_core.Mat P,
opencv_core.Size size,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2)
\brief Computes undistortion and rectification maps for image transform by cv::remap().
|
static void |
opencv_imgproc.initUndistortRectifyMap(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat R,
opencv_core.Mat newCameraMatrix,
opencv_core.Size size,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2)
\brief Computes the undistortion and rectification transformation map.
|
static float |
opencv_imgproc.initWideAngleProjMap(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
int destImageWidth,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2) |
static float |
opencv_imgproc.initWideAngleProjMap(opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Size imageSize,
int destImageWidth,
int m1type,
opencv_core.Mat map1,
opencv_core.Mat map2,
int projType,
double alpha)
initializes maps for #remap for wide-angle
|
void |
opencv_videostab.InpainterBase.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
void |
opencv_videostab.NullInpainter.inpaint(int arg0,
opencv_core.Mat arg1,
opencv_core.Mat arg2) |
void |
opencv_videostab.InpaintingPipeline.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
void |
opencv_videostab.ConsistentMosaicInpainter.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
void |
opencv_videostab.MotionInpainter.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
void |
opencv_videostab.ColorAverageInpainter.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
void |
opencv_videostab.ColorInpainter.inpaint(int idx,
opencv_core.Mat frame,
opencv_core.Mat mask) |
static void |
opencv_photo.inpaint(opencv_core.Mat src,
opencv_core.Mat inpaintMask,
opencv_core.Mat dst,
double inpaintRadius,
int flags)
\brief Restores the selected region in an image using the region neighborhood.
|
static void |
opencv_xphoto.inpaint(opencv_core.Mat src,
opencv_core.Mat mask,
opencv_core.Mat dst,
int algorithmType)
\brief The function implements different single-image inpainting algorithms.
|
static void |
opencv_core.inRange(opencv_core.Mat src,
opencv_core.Mat lowerb,
opencv_core.Mat upperb,
opencv_core.Mat dst)
\brief Checks if array elements lie between the elements of two other arrays.
|
opencv_core.MatVector.Iterator |
opencv_core.MatVector.insert(opencv_core.MatVector.Iterator pos,
opencv_core.Mat value) |
opencv_dnn.MatPointerVector.Iterator |
opencv_dnn.MatPointerVector.insert(opencv_dnn.MatPointerVector.Iterator pos,
opencv_core.Mat value) |
static void |
opencv_core.insertChannel(opencv_core.Mat src,
opencv_core.Mat dst,
int coi)
\brief Inserts a single channel to dst (coi is 0-based index)
|
static void |
opencv_core.insertImageCOI(opencv_core.Mat coiimg,
opencv_core.CvArr arr) |
static void |
opencv_core.insertImageCOI(opencv_core.Mat coiimg,
opencv_core.CvArr arr,
int coi)
inserts single-channel cv::Mat into a multi-channel CvMat or IplImage
|
static void |
opencv_cudaarithm.integral(opencv_core.Mat src,
opencv_core.Mat sum) |
static void |
opencv_imgproc.integral(opencv_core.Mat src,
opencv_core.Mat sum) |
static void |
opencv_imgproc.integral(opencv_core.Mat src,
opencv_core.Mat sum,
int sdepth)
\} imgproc_transform
|
static void |
opencv_cudaarithm.integral(opencv_core.Mat src,
opencv_core.Mat sum,
opencv_core.Stream stream)
\brief Computes an integral image.
|
static void |
opencv_imgproc.integral2(opencv_core.Mat src,
opencv_core.Mat sum,
opencv_core.Mat sqsum) |
static void |
opencv_imgproc.integral2(opencv_core.Mat src,
opencv_core.Mat sum,
opencv_core.Mat sqsum,
int sdepth,
int sqdepth)
\overload
|
static void |
opencv_imgproc.integral3(opencv_core.Mat src,
opencv_core.Mat sum,
opencv_core.Mat sqsum,
opencv_core.Mat tilted) |
static void |
opencv_imgproc.integral3(opencv_core.Mat src,
opencv_core.Mat sum,
opencv_core.Mat sqsum,
opencv_core.Mat tilted,
int sdepth,
int sqdepth)
\brief Calculates the integral of an image.
|
void |
opencv_ximgproc.SparseMatchInterpolator.interpolate(opencv_core.Mat from_image,
opencv_core.Mat from_points,
opencv_core.Mat to_image,
opencv_core.Mat to_points,
opencv_core.Mat dense_flow)
\brief Interpolate input sparse matches.
|
static int |
opencv_aruco.interpolateCornersCharuco(opencv_core.GpuMatVector markerCorners,
opencv_core.Mat markerIds,
opencv_core.Mat image,
opencv_aruco.CharucoBoard board,
opencv_core.Mat charucoCorners,
opencv_core.Mat charucoIds) |
static int |
opencv_aruco.interpolateCornersCharuco(opencv_core.GpuMatVector markerCorners,
opencv_core.Mat markerIds,
opencv_core.Mat image,
opencv_aruco.CharucoBoard board,
opencv_core.Mat charucoCorners,
opencv_core.Mat charucoIds,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
int minMarkers) |
static int |
opencv_aruco.interpolateCornersCharuco(opencv_core.MatVector markerCorners,
opencv_core.Mat markerIds,
opencv_core.Mat image,
opencv_aruco.CharucoBoard board,
opencv_core.Mat charucoCorners,
opencv_core.Mat charucoIds) |
static int |
opencv_aruco.interpolateCornersCharuco(opencv_core.MatVector markerCorners,
opencv_core.Mat markerIds,
opencv_core.Mat image,
opencv_aruco.CharucoBoard board,
opencv_core.Mat charucoCorners,
opencv_core.Mat charucoIds,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
int minMarkers)
\brief Interpolate position of ChArUco board corners
|
static int |
opencv_aruco.interpolateCornersCharuco(opencv_core.UMatVector markerCorners,
opencv_core.Mat markerIds,
opencv_core.Mat image,
opencv_aruco.CharucoBoard board,
opencv_core.Mat charucoCorners,
opencv_core.Mat charucoIds) |
static int |
opencv_aruco.interpolateCornersCharuco(opencv_core.UMatVector markerCorners,
opencv_core.Mat markerIds,
opencv_core.Mat image,
opencv_aruco.CharucoBoard board,
opencv_core.Mat charucoCorners,
opencv_core.Mat charucoIds,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
int minMarkers) |
static float |
opencv_imgproc.intersectConvexConvex(opencv_core.Mat _p1,
opencv_core.Mat _p2,
opencv_core.Mat _p12) |
static float |
opencv_imgproc.intersectConvexConvex(opencv_core.Mat _p1,
opencv_core.Mat _p2,
opencv_core.Mat _p12,
boolean handleNested)
finds intersection of two convex polygons
|
static double |
opencv_core.invert(opencv_core.Mat src,
opencv_core.Mat dst) |
static double |
opencv_core.invert(opencv_core.Mat src,
opencv_core.Mat dst,
int flags)
\brief Finds the inverse or pseudo-inverse of a matrix.
|
static void |
opencv_imgproc.invertAffineTransform(opencv_core.Mat M,
opencv_core.Mat iM)
\brief Inverts an affine transformation.
|
static boolean |
opencv_imgproc.isContourConvex(opencv_core.Mat contour)
\brief Tests a contour convexity.
|
void |
opencv_ximgproc.SuperpixelSEEDS.iterate(opencv_core.Mat img) |
void |
opencv_ximgproc.SuperpixelSEEDS.iterate(opencv_core.Mat img,
int num_iterations)
\brief Calculates the superpixel segmentation on a given image with the initialized
parameters in the SuperpixelSEEDS object.
|
static void |
opencv_ximgproc.jointBilateralFilter(opencv_core.Mat joint,
opencv_core.Mat src,
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace) |
static void |
opencv_ximgproc.jointBilateralFilter(opencv_core.Mat joint,
opencv_core.Mat src,
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType)
\brief Applies the joint bilateral filter to an image.
|
static org.bytedeco.javacpp.BytePointer |
opencv_core.kernelToStr(opencv_core.Mat _kernel) |
static org.bytedeco.javacpp.BytePointer |
opencv_core.kernelToStr(opencv_core.Mat _kernel,
int ddepth,
org.bytedeco.javacpp.BytePointer name) |
static String |
opencv_core.kernelToStr(opencv_core.Mat _kernel,
int ddepth,
String name) |
static double |
opencv_core.kmeans(opencv_core.Mat data,
int K,
opencv_core.Mat bestLabels,
opencv_core.TermCriteria criteria,
int attempts,
int flags) |
static double |
opencv_core.kmeans(opencv_core.Mat data,
int K,
opencv_core.Mat bestLabels,
opencv_core.TermCriteria criteria,
int attempts,
int flags,
opencv_core.Mat centers)
\brief Finds centers of clusters and groups input samples around the clusters.
|
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVectorVector matches,
int k) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVectorVector matches,
int k,
opencv_core.GpuMatVector masks,
boolean compactResult) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVectorVector matches,
int k,
opencv_core.MatVector masks,
boolean compactResult)
\overload
|
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVectorVector matches,
int k,
opencv_core.UMatVector masks,
boolean compactResult) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_core.DMatchVectorVector matches,
int k) |
void |
opencv_features2d.DescriptorMatcher.knnMatch(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_core.DMatchVectorVector matches,
int k,
opencv_core.Mat mask,
boolean compactResult)
\brief Finds the k best matches for each descriptor from a query set.
|
void |
opencv_flann.Index.knnSearch(opencv_core.Mat query,
opencv_core.Mat indices,
opencv_core.Mat dists,
int knn) |
void |
opencv_flann.Index.knnSearch(opencv_core.Mat query,
opencv_core.Mat indices,
opencv_core.Mat dists,
int knn,
opencv_flann.SearchParams params) |
static void |
opencv_ximgproc.l0Smooth(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_ximgproc.l0Smooth(opencv_core.Mat src,
opencv_core.Mat dst,
double lambda,
double kappa)
\brief Global image smoothing via L0 gradient minimization.
|
static void |
opencv_imgproc.Laplacian(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth) |
static void |
opencv_imgproc.Laplacian(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
int ksize,
double scale,
double delta,
int borderType)
\brief Calculates the Laplacian of an image.
|
static opencv_core.MatExpr |
opencv_core.lessThan(double s,
opencv_core.Mat a) |
static opencv_core.MatExpr |
opencv_core.lessThan(opencv_core.Mat a,
double s) |
static opencv_core.MatExpr |
opencv_core.lessThan(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.MatExpr |
opencv_core.lessThanEquals(double s,
opencv_core.Mat a) |
static opencv_core.MatExpr |
opencv_core.lessThanEquals(opencv_core.Mat a,
double s) |
static opencv_core.MatExpr |
opencv_core.lessThanEquals(opencv_core.Mat a,
opencv_core.Mat b) |
static void |
opencv_imgproc.line(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color) |
static void |
opencv_imgproc.line(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
\brief Draws a line segment connecting two points.
|
static void |
opencv_imgproc.linearPolar(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Point2f center,
double maxRadius,
int flags)
Deprecated.
This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags)
\internal Transform the source image using the following transformation (See \ref polar_remaps_reference_image "Polar remaps reference image c)"): \f[\begin{array}{l} dst( \rho , \phi ) = src(x,y) \\ dst.size() \leftarrow src.size() \end{array}\f] where \f[\begin{array}{l} I = (dx,dy) = (x - center.x,y - center.y) \\ \rho = Kmag \cdot \texttt{magnitude} (I) ,\\ \phi = angle \cdot \texttt{angle} (I) \end{array}\f] and \f[\begin{array}{l} Kx = src.cols / maxRadius \\ Ky = src.rows / 2\Pi \end{array}\f]
|
boolean |
opencv_flann.Index.load(opencv_core.Mat features,
org.bytedeco.javacpp.BytePointer filename) |
boolean |
opencv_flann.Index.load(opencv_core.Mat features,
String filename) |
static void |
opencv_core.log(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Calculates the natural logarithm of every array element.
|
static void |
opencv_cudaarithm.log(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.log(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Computes a natural logarithm of absolute value of each matrix element.
|
static void |
opencv_imgproc.logPolar(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Point2f center,
double M,
int flags)
Deprecated.
This function produces same result as cv::warpPolar(src, dst, src.size(), center, maxRadius, flags+WARP_POLAR_LOG);
\internal Transform the source image using the following transformation (See \ref polar_remaps_reference_image "Polar remaps reference image d)"): \f[\begin{array}{l} dst( \rho , \phi ) = src(x,y) \\ dst.size() \leftarrow src.size() \end{array}\f] where \f[\begin{array}{l} I = (dx,dy) = (x - center.x,y - center.y) \\ \rho = M \cdot log_e(\texttt{magnitude} (I)) ,\\ \phi = Kangle \cdot \texttt{angle} (I) \\ \end{array}\f] and \f[\begin{array}{l} M = src.cols / log_e(maxRadius) \\ Kangle = src.rows / 2\Pi \\ \end{array}\f] The function emulates the human "foveal" vision and can be used for fast scale and rotation-invariant template matching, for object tracking and so forth. |
static void |
opencv_cudaarithm.lshift(opencv_core.Mat src,
opencv_core.Scalar4i val,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.lshift(opencv_core.Mat src,
opencv_core.Scalar4i val,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Performs pixel by pixel right left of an image by a constant value.
|
static void |
opencv_core.LUT(opencv_core.Mat src,
opencv_core.Mat lut,
opencv_core.Mat dst)
\brief Performs a look-up table transform of an array.
|
static void |
opencv_cudaarithm.magnitude(opencv_core.Mat xy,
opencv_core.Mat magnitude) |
static void |
opencv_core.magnitude(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude)
\brief Calculates the magnitude of 2D vectors.
|
static void |
opencv_cudaarithm.magnitude(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude) |
static void |
opencv_cudaarithm.magnitude(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude,
opencv_core.Stream stream)
\overload
computes magnitude of each (x(i), y(i)) vector
supports only floating-point source
|
static void |
opencv_cudaarithm.magnitude(opencv_core.Mat xy,
opencv_core.Mat magnitude,
opencv_core.Stream stream)
\brief Computes magnitudes of complex matrix elements.
|
static void |
opencv_cudaarithm.magnitudeSqr(opencv_core.Mat xy,
opencv_core.Mat magnitude) |
static void |
opencv_cudaarithm.magnitudeSqr(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude) |
static void |
opencv_cudaarithm.magnitudeSqr(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat magnitude,
opencv_core.Stream stream)
\overload
computes squared magnitude of each (x(i), y(i)) vector
supports only floating-point source
|
static void |
opencv_cudaarithm.magnitudeSqr(opencv_core.Mat xy,
opencv_core.Mat magnitude,
opencv_core.Stream stream)
\brief Computes squared magnitudes of complex matrix elements.
|
static double |
opencv_core.Mahalanobis(opencv_core.Mat v1,
opencv_core.Mat v2,
opencv_core.Mat icovar)
\brief Calculates the Mahalanobis distance between two vectors.
|
static void |
opencv_img_hash.marrHildrethHash(opencv_core.Mat inputArr,
opencv_core.Mat outputArr) |
static void |
opencv_img_hash.marrHildrethHash(opencv_core.Mat inputArr,
opencv_core.Mat outputArr,
float alpha,
float scale)
\brief Computes average hash value of the input image
|
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVector matches) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVector matches,
opencv_core.GpuMatVector masks) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVector matches,
opencv_core.MatVector masks)
\overload
|
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVector matches,
opencv_core.UMatVector masks) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_core.DMatchVector matches) |
void |
opencv_features2d.DescriptorMatcher.match(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_core.DMatchVector matches,
opencv_core.Mat mask)
\brief Finds the best match for each descriptor from a query set.
|
void |
opencv_cudaimgproc.TemplateMatching.match(opencv_core.Mat image,
opencv_core.Mat templ,
opencv_core.Mat result) |
void |
opencv_cudaimgproc.TemplateMatching.match(opencv_core.Mat image,
opencv_core.Mat templ,
opencv_core.Mat result,
opencv_core.Stream stream)
\brief Computes a proximity map for a raster template and an image where the template is searched for.
|
static double |
opencv_imgproc.matchShapes(opencv_core.Mat contour1,
opencv_core.Mat contour2,
int method,
double parameter)
\brief Compares two shapes.
|
static void |
opencv_imgproc.matchTemplate(opencv_core.Mat image,
opencv_core.Mat templ,
opencv_core.Mat result,
int method) |
static void |
opencv_imgproc.matchTemplate(opencv_core.Mat image,
opencv_core.Mat templ,
opencv_core.Mat result,
int method,
opencv_core.Mat mask)
\brief Compares a template against overlapped image regions.
|
static void |
opencv_calib3d.matMulDeriv(opencv_core.Mat A,
opencv_core.Mat B,
opencv_core.Mat dABdA,
opencv_core.Mat dABdB)
\brief Computes partial derivatives of the matrix product for each multiplied matrix.
|
static opencv_core.MatExpr |
opencv_core.max(double s,
opencv_core.Mat a) |
static opencv_core.MatExpr |
opencv_core.max(opencv_core.Mat a,
double s) |
static opencv_core.MatExpr |
opencv_core.max(opencv_core.Mat a,
opencv_core.Mat b) |
static void |
opencv_core.max(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst)
\overload
needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)
|
static void |
opencv_cudaarithm.max(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.max(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Computes the per-element maximum of two matrices (or a matrix and a scalar).
|
static opencv_core.Scalar |
opencv_core.mean(opencv_core.Mat src) |
opencv_core.PCA |
opencv_core.PCA.mean(opencv_core.Mat mean) |
static opencv_core.Scalar |
opencv_core.mean(opencv_core.Mat src,
opencv_core.Mat mask)
\brief Calculates an average (mean) of array elements.
|
static int |
opencv_video.meanShift(opencv_core.Mat probImage,
opencv_core.Rect window,
opencv_core.TermCriteria criteria)
\brief Finds an object on a back projection image.
|
static void |
opencv_cudaimgproc.meanShiftFiltering(opencv_core.Mat src,
opencv_core.Mat dst,
int sp,
int sr) |
static void |
opencv_cudaimgproc.meanShiftFiltering(opencv_core.Mat src,
opencv_core.Mat dst,
int sp,
int sr,
opencv_core.TermCriteria criteria,
opencv_core.Stream stream)
\} cudaimgproc_feature
|
static void |
opencv_cudaimgproc.meanShiftProc(opencv_core.Mat src,
opencv_core.Mat dstr,
opencv_core.Mat dstsp,
int sp,
int sr) |
static void |
opencv_cudaimgproc.meanShiftProc(opencv_core.Mat src,
opencv_core.Mat dstr,
opencv_core.Mat dstsp,
int sp,
int sr,
opencv_core.TermCriteria criteria,
opencv_core.Stream stream)
\brief Performs a mean-shift procedure and stores information about processed points (their colors and
positions) in two images.
|
static void |
opencv_cudaimgproc.meanShiftSegmentation(opencv_core.Mat src,
opencv_core.Mat dst,
int sp,
int sr,
int minsize) |
static void |
opencv_cudaimgproc.meanShiftSegmentation(opencv_core.Mat src,
opencv_core.Mat dst,
int sp,
int sr,
int minsize,
opencv_core.TermCriteria criteria,
opencv_core.Stream stream)
\brief Performs a mean-shift segmentation of the source image and eliminates small segments.
|
static void |
opencv_cudaarithm.meanStdDev(opencv_core.Mat mtx,
opencv_core.Mat dst) |
static void |
opencv_core.meanStdDev(opencv_core.Mat src,
opencv_core.Mat mean,
opencv_core.Mat stddev) |
static void |
opencv_core.meanStdDev(opencv_core.Mat src,
opencv_core.Mat mean,
opencv_core.Mat stddev,
opencv_core.Mat mask)
Calculates a mean and standard deviation of array elements.
|
static void |
opencv_cudaarithm.meanStdDev(opencv_core.Mat mtx,
opencv_core.Mat dst,
opencv_core.Stream stream)
\overload
|
static void |
opencv_cudaarithm.meanStdDev(opencv_core.Mat mtx,
opencv_core.Scalar mean,
opencv_core.Scalar stddev)
\brief Computes a mean value and a standard deviation of matrix elements.
|
void |
opencv_tracking.UkfSystemModel.measurementFunction(opencv_core.Mat x_k,
opencv_core.Mat n_k,
opencv_core.Mat z_k)
The function for computing the measurement from the state
|
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.measurementMatrix(opencv_core.Mat measurementMatrix) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.measurementNoiseCov(opencv_core.Mat measurementNoiseCov) |
opencv_tracking.UnscentedKalmanFilterParams |
opencv_tracking.UnscentedKalmanFilterParams.measurementNoiseCov(opencv_core.Mat measurementNoiseCov) |
static void |
opencv_imgproc.medianBlur(opencv_core.Mat src,
opencv_core.Mat dst,
int ksize)
\brief Blurs an image using the median filter.
|
static void |
opencv_cudaarithm.merge(opencv_core.GpuMat src,
long n,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.merge(opencv_core.GpuMat src,
long n,
opencv_core.Mat dst,
opencv_core.Stream stream)
\} cudaarithm_elem
|
static void |
opencv_core.merge(opencv_core.GpuMatVector mv,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.merge(opencv_core.GpuMatVector src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.merge(opencv_core.GpuMatVector src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\overload
|
static void |
opencv_core.merge(opencv_core.Mat mv,
long count,
opencv_core.GpuMat dst) |
static void |
opencv_core.merge(opencv_core.Mat mv,
long count,
opencv_core.Mat dst)
\brief Creates one multi-channel array out of several single-channel ones.
|
static void |
opencv_core.merge(opencv_core.Mat mv,
long count,
opencv_core.UMat dst) |
static void |
opencv_core.merge(opencv_core.MatVector mv,
opencv_core.Mat dst)
\overload
|
static void |
opencv_core.merge(opencv_core.UMatVector mv,
opencv_core.Mat dst) |
static opencv_core.MatExpr |
opencv_core.min(double s,
opencv_core.Mat a) |
static opencv_core.MatExpr |
opencv_core.min(opencv_core.Mat a,
double s) |
static opencv_core.MatExpr |
opencv_core.min(opencv_core.Mat a,
opencv_core.Mat b) |
static void |
opencv_core.min(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst)
\overload
needed to avoid conflicts with const _Tp& std::min(const _Tp&, const _Tp&, _Compare)
|
static void |
opencv_cudaarithm.min(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.min(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Computes the per-element minimum of two matrices (or a matrix and a scalar).
|
static opencv_core.RotatedRect |
opencv_imgproc.minAreaRect(opencv_core.Mat points)
\brief Finds a rotated rectangle of the minimum area enclosing the input 2D point set.
|
static void |
opencv_imgproc.minEnclosingCircle(opencv_core.Mat points,
opencv_core.Point2f center,
float[] radius) |
static void |
opencv_imgproc.minEnclosingCircle(opencv_core.Mat points,
opencv_core.Point2f center,
FloatBuffer radius) |
static void |
opencv_imgproc.minEnclosingCircle(opencv_core.Mat points,
opencv_core.Point2f center,
org.bytedeco.javacpp.FloatPointer radius)
\brief Finds a circle of the minimum area enclosing a 2D point set.
|
static double |
opencv_imgproc.minEnclosingTriangle(opencv_core.Mat points,
opencv_core.Mat triangle)
\brief Finds a triangle of minimum area enclosing a 2D point set and returns its area.
|
double |
opencv_core.MinProblemSolver.minimize(opencv_core.Mat x)
\brief actually runs the algorithm and performs the minimization.
|
static void |
opencv_cudaarithm.minMax(opencv_core.Mat src,
double[] minVal,
double[] maxVal) |
static void |
opencv_cudaarithm.minMax(opencv_core.Mat src,
double[] minVal,
double[] maxVal,
opencv_core.Mat mask) |
static void |
opencv_cudaarithm.minMax(opencv_core.Mat src,
DoubleBuffer minVal,
DoubleBuffer maxVal) |
static void |
opencv_cudaarithm.minMax(opencv_core.Mat src,
DoubleBuffer minVal,
DoubleBuffer maxVal,
opencv_core.Mat mask) |
static void |
opencv_cudaarithm.minMax(opencv_core.Mat src,
org.bytedeco.javacpp.DoublePointer minVal,
org.bytedeco.javacpp.DoublePointer maxVal) |
static void |
opencv_cudaarithm.minMax(opencv_core.Mat src,
org.bytedeco.javacpp.DoublePointer minVal,
org.bytedeco.javacpp.DoublePointer maxVal,
opencv_core.Mat mask)
\brief Finds global minimum and maximum matrix elements and returns their values.
|
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
double[] minVal) |
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
double[] minVal,
double[] maxVal,
int[] minIdx,
int[] maxIdx,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
DoubleBuffer minVal) |
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
DoubleBuffer minVal,
DoubleBuffer maxVal,
IntBuffer minIdx,
IntBuffer maxIdx,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
org.bytedeco.javacpp.DoublePointer minVal) |
static void |
opencv_core.minMaxIdx(opencv_core.Mat src,
org.bytedeco.javacpp.DoublePointer minVal,
org.bytedeco.javacpp.DoublePointer maxVal,
org.bytedeco.javacpp.IntPointer minIdx,
org.bytedeco.javacpp.IntPointer maxIdx,
opencv_core.Mat mask)
\brief Finds the global minimum and maximum in an array
|
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
double[] minVal) |
static void |
opencv_cudaarithm.minMaxLoc(opencv_core.Mat src,
double[] minVal,
double[] maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
double[] minVal,
double[] maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask) |
static void |
opencv_cudaarithm.minMaxLoc(opencv_core.Mat src,
double[] minVal,
double[] maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
DoubleBuffer minVal) |
static void |
opencv_cudaarithm.minMaxLoc(opencv_core.Mat src,
DoubleBuffer minVal,
DoubleBuffer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
DoubleBuffer minVal,
DoubleBuffer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask) |
static void |
opencv_cudaarithm.minMaxLoc(opencv_core.Mat src,
DoubleBuffer minVal,
DoubleBuffer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
org.bytedeco.javacpp.DoublePointer minVal) |
static void |
opencv_cudaarithm.minMaxLoc(opencv_core.Mat src,
org.bytedeco.javacpp.DoublePointer minVal,
org.bytedeco.javacpp.DoublePointer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc) |
static void |
opencv_core.minMaxLoc(opencv_core.Mat src,
org.bytedeco.javacpp.DoublePointer minVal,
org.bytedeco.javacpp.DoublePointer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask)
\brief Finds the global minimum and maximum in an array.
|
static void |
opencv_cudaarithm.minMaxLoc(opencv_core.Mat src,
org.bytedeco.javacpp.DoublePointer minVal,
org.bytedeco.javacpp.DoublePointer maxVal,
opencv_core.Point minLoc,
opencv_core.Point maxLoc,
opencv_core.Mat mask)
\brief Finds global minimum and maximum matrix elements and returns their values with locations.
|
static void |
opencv_core.mixChannels(opencv_core.Mat src,
long nsrcs,
opencv_core.Mat dst,
long ndsts,
int[] fromTo,
long npairs) |
static void |
opencv_core.mixChannels(opencv_core.Mat src,
long nsrcs,
opencv_core.Mat dst,
long ndsts,
IntBuffer fromTo,
long npairs) |
static void |
opencv_core.mixChannels(opencv_core.Mat src,
long nsrcs,
opencv_core.Mat dst,
long ndsts,
org.bytedeco.javacpp.IntPointer fromTo,
long npairs)
\brief Copies specified channels from input arrays to the specified channels of
output arrays.
|
static opencv_core.Moments |
opencv_imgproc.moments(opencv_core.Mat array) |
static opencv_core.Moments |
opencv_imgproc.moments(opencv_core.Mat array,
boolean binaryImage)
\addtogroup imgproc_shape
\{
|
static void |
opencv_imgproc.morphologyEx(opencv_core.Mat src,
opencv_core.Mat dst,
int op,
opencv_core.Mat kernel) |
static void |
opencv_imgproc.morphologyEx(opencv_core.Mat src,
opencv_core.Mat dst,
int op,
opencv_core.Mat kernel,
opencv_core.Point anchor,
int iterations,
int borderType,
opencv_core.Scalar borderValue)
\brief Performs advanced morphological transformations.
|
static void |
opencv_text.MSERsToERStats(opencv_core.Mat image,
opencv_core.PointVectorVector contours,
opencv_text.ERStatVectorVector regions)
\brief Converts MSER contours (vector\
|
opencv_core.MatExpr |
opencv_core.Mat.mul(opencv_core.Mat m) |
opencv_core.UMat |
opencv_core.UMat.mul(opencv_core.Mat m) |
opencv_core.MatExpr |
opencv_core.MatExpr.mul(opencv_core.Mat m) |
opencv_core.MatExpr |
opencv_core.Mat.mul(opencv_core.Mat m,
double scale)
\brief Performs an element-wise multiplication or division of the two matrices.
|
opencv_core.UMat |
opencv_core.UMat.mul(opencv_core.Mat m,
double scale)
per-element matrix multiplication by means of matrix expressions
|
opencv_core.MatExpr |
opencv_core.MatExpr.mul(opencv_core.Mat m,
double scale) |
static void |
opencv_cudaarithm.mulAndScaleSpectrums(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int flags,
float scale) |
static void |
opencv_cudaarithm.mulAndScaleSpectrums(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int flags,
float scale,
boolean conjB,
opencv_core.Stream stream)
\brief Performs a per-element multiplication of two Fourier spectrums and scales the result.
|
static void |
opencv_core.mulSpectrums(opencv_core.Mat a,
opencv_core.Mat b,
opencv_core.Mat c,
int flags) |
static void |
opencv_cudaarithm.mulSpectrums(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int flags) |
static void |
opencv_core.mulSpectrums(opencv_core.Mat a,
opencv_core.Mat b,
opencv_core.Mat c,
int flags,
boolean conjB)
\brief Performs the per-element multiplication of two Fourier spectrums.
|
static void |
opencv_cudaarithm.mulSpectrums(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int flags,
boolean conjB,
opencv_core.Stream stream)
\brief Performs a per-element multiplication of two Fourier spectrums.
|
static opencv_core.MatExpr |
opencv_core.multiply(double s,
opencv_core.Mat a) |
static opencv_core.MatExpr |
opencv_core.multiply(opencv_core.Mat a,
double s) |
static opencv_core.MatExpr |
opencv_core.multiply(opencv_core.MatExpr e,
opencv_core.Mat m) |
static opencv_core.MatExpr |
opencv_core.multiply(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.MatExpr |
opencv_core.multiply(opencv_core.Mat m,
opencv_core.MatExpr e) |
static void |
opencv_core.multiply(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.multiply(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.multiply(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
double scale,
int dtype)
\brief Calculates the per-element scaled product of two arrays.
|
static void |
opencv_cudaarithm.multiply(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
double scale,
int dtype,
opencv_core.Stream stream)
\brief Computes a matrix-matrix or matrix-scalar per-element product.
|
static opencv_core.Mat |
opencv_core.multiplyPut(opencv_core.Mat a,
double b) |
static opencv_core.Mat |
opencv_core.multiplyPut(opencv_core.Mat a,
opencv_core.Mat b) |
static void |
opencv_core.mulTransposed(opencv_core.Mat src,
opencv_core.Mat dst,
boolean aTa) |
static void |
opencv_core.mulTransposed(opencv_core.Mat src,
opencv_core.Mat dst,
boolean aTa,
opencv_core.Mat delta,
double scale,
int dtype)
\brief Calculates the product of a matrix and its transposition.
|
void |
opencv_superres.FrameSource.nextFrame(opencv_core.Mat frame) |
void |
opencv_superres.SuperResolution.nextFrame(opencv_core.Mat frame)
\brief Process next frame from input and return output result.
|
static void |
opencv_ximgproc.niBlackThreshold(opencv_core.Mat _src,
opencv_core.Mat _dst,
double maxValue,
int type,
int blockSize,
double k) |
static void |
opencv_ximgproc.niBlackThreshold(opencv_core.Mat _src,
opencv_core.Mat _dst,
double maxValue,
int type,
int blockSize,
double k,
int binarizationMethod)
\addtogroup ximgproc
\{
|
static void |
opencv_photo.nonLocalMeans(opencv_core.Mat src,
opencv_core.Mat dst,
float h) |
static void |
opencv_photo.nonLocalMeans(opencv_core.Mat src,
opencv_core.Mat dst,
float h,
int search_window,
int block_size,
int borderMode,
opencv_core.Stream stream)
\addtogroup photo_denoise
\{
|
static double |
opencv_core.norm(opencv_core.Mat src1) |
static double |
opencv_cudaarithm.norm(opencv_core.Mat src1,
int normType) |
static double |
opencv_core.norm(opencv_core.Mat src1,
int normType,
opencv_core.Mat mask)
\brief Calculates the absolute norm of an array.
|
static double |
opencv_cudaarithm.norm(opencv_core.Mat src1,
int normType,
opencv_core.Mat mask)
\} cudaarithm_core
|
static double |
opencv_core.norm(opencv_core.Mat src1,
opencv_core.Mat src2) |
static double |
opencv_cudaarithm.norm(opencv_core.Mat src1,
opencv_core.Mat src2) |
static double |
opencv_cudaarithm.norm(opencv_core.Mat src1,
opencv_core.Mat src2,
int normType)
\brief Returns the difference of two matrices.
|
static double |
opencv_core.norm(opencv_core.Mat src1,
opencv_core.Mat src2,
int normType,
opencv_core.Mat mask)
\brief Calculates an absolute difference norm or a relative difference norm.
|
static void |
opencv_core.normalize(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.normalize(opencv_core.Mat src,
opencv_core.Mat dst,
double alpha,
double beta,
int norm_type,
int dtype) |
static void |
opencv_core.normalize(opencv_core.Mat src,
opencv_core.Mat dst,
double alpha,
double beta,
int norm_type,
int dtype,
opencv_core.Mat mask)
\brief Normalizes the norm or value range of an array.
|
static void |
opencv_cudaarithm.normalize(opencv_core.Mat src,
opencv_core.Mat dst,
double alpha,
double beta,
int norm_type,
int dtype,
opencv_core.Mat mask,
opencv_core.Stream stream)
\brief Normalizes the norm or value range of an array.
|
static void |
opencv_stitching.normalizeUsingWeightMap(opencv_core.Mat weight,
opencv_core.Mat src) |
static opencv_core.MatExpr |
opencv_core.not(opencv_core.Mat m) |
static opencv_core.MatExpr |
opencv_core.notEquals(double s,
opencv_core.Mat a) |
static opencv_core.MatExpr |
opencv_core.notEquals(opencv_core.Mat a,
double s) |
static opencv_core.MatExpr |
opencv_core.notEquals(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.MatExpr |
opencv_core.or(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.MatExpr |
opencv_core.or(opencv_core.Mat a,
opencv_core.Scalar s) |
static opencv_core.MatExpr |
opencv_core.or(opencv_core.Scalar s,
opencv_core.Mat a) |
static opencv_core.Mat |
opencv_core.orPut(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.Mat |
opencv_core.orPut(opencv_core.Mat a,
opencv_core.Scalar b) |
static void |
opencv_core.patchNaNs(opencv_core.Mat a) |
static void |
opencv_core.patchNaNs(opencv_core.Mat a,
double val)
\brief converts NaN's to the given number
|
static void |
opencv_core.PCABackProject(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
opencv_core.Mat result)
wrap PCA::backProject
|
static void |
opencv_core.PCACompute(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors) |
static void |
opencv_core.PCACompute(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
double retainedVariance)
wrap PCA::operator()
|
static void |
opencv_core.PCACompute(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
int maxComponents)
wrap PCA::operator()
|
static void |
opencv_core.PCACompute2(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
opencv_core.Mat eigenvalues) |
static void |
opencv_core.PCACompute2(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
opencv_core.Mat eigenvalues,
double retainedVariance)
wrap PCA::operator() and add eigenvalues output parameter
|
static void |
opencv_core.PCACompute2(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
opencv_core.Mat eigenvalues,
int maxComponents)
wrap PCA::operator() and add eigenvalues output parameter
|
static void |
opencv_core.PCAProject(opencv_core.Mat data,
opencv_core.Mat mean,
opencv_core.Mat eigenvectors,
opencv_core.Mat result)
wrap PCA::project
|
static void |
opencv_photo.pencilSketch(opencv_core.Mat src,
opencv_core.Mat dst1,
opencv_core.Mat dst2) |
static void |
opencv_photo.pencilSketch(opencv_core.Mat src,
opencv_core.Mat dst1,
opencv_core.Mat dst2,
float sigma_s,
float sigma_r,
float shade_factor)
\brief Pencil-like non-photorealistic line drawing
|
static void |
opencv_core.perspectiveTransform(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat m)
\brief Performs the perspective matrix transformation of vectors.
|
static void |
opencv_core.phase(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat angle) |
static void |
opencv_cudaarithm.phase(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat angle) |
static void |
opencv_core.phase(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat angle,
boolean angleInDegrees)
\brief Calculates the rotation angle of 2D vectors.
|
static void |
opencv_cudaarithm.phase(opencv_core.Mat x,
opencv_core.Mat y,
opencv_core.Mat angle,
boolean angleInDegrees,
opencv_core.Stream stream)
\brief Computes polar angles of complex matrix elements.
|
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelate(opencv_core.Mat src1,
opencv_core.Mat src2) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelate(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat window,
double[] response) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelate(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat window,
DoubleBuffer response) |
static opencv_core.Point2d |
opencv_imgproc.phaseCorrelate(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat window,
org.bytedeco.javacpp.DoublePointer response)
\brief The function is used to detect translational shifts that occur between two images.
|
static void |
opencv_img_hash.pHash(opencv_core.Mat inputArr,
opencv_core.Mat outputArr)
\brief Computes pHash value of the input image
|
opencv_core.NAryMatIterator |
opencv_core.NAryMatIterator.planes(opencv_core.Mat planes) |
static double |
opencv_imgproc.pointPolygonTest(opencv_core.Mat contour,
opencv_core.Point2f pt,
boolean measureDist)
\brief Performs a point-in-contour test.
|
static void |
opencv_core.polarToCart(opencv_core.Mat magnitude,
opencv_core.Mat angle,
opencv_core.Mat x,
opencv_core.Mat y) |
static void |
opencv_cudaarithm.polarToCart(opencv_core.Mat magnitude,
opencv_core.Mat angle,
opencv_core.Mat x,
opencv_core.Mat y) |
static void |
opencv_core.polarToCart(opencv_core.Mat magnitude,
opencv_core.Mat angle,
opencv_core.Mat x,
opencv_core.Mat y,
boolean angleInDegrees)
\brief Calculates x and y coordinates of 2D vectors from their magnitude and angle.
|
static void |
opencv_cudaarithm.polarToCart(opencv_core.Mat magnitude,
opencv_core.Mat angle,
opencv_core.Mat x,
opencv_core.Mat y,
boolean angleInDegrees,
opencv_core.Stream stream)
\brief Converts polar coordinates into Cartesian.
|
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.GpuMatVector pts,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.GpuMatVector pts,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.MatVector pts,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.MatVector pts,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
\brief Draws several polygonal curves.
|
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.Point pts,
int[] npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.Point pts,
IntBuffer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.Point pts,
org.bytedeco.javacpp.IntPointer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.Point pts,
org.bytedeco.javacpp.IntPointer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.UMatVector pts,
boolean isClosed,
opencv_core.Scalar color) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
opencv_core.UMatVector pts,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_imgproc.polylines(opencv_core.Mat img,
org.bytedeco.javacpp.PointerPointer pts,
org.bytedeco.javacpp.IntPointer npts,
int ncontours,
boolean isClosed,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
\overload
|
static void |
opencv_core.pow(opencv_core.Mat src,
double power,
opencv_core.Mat dst)
\brief Raises every array element to a power.
|
static void |
opencv_cudaarithm.pow(opencv_core.Mat src,
double power,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.pow(opencv_core.Mat src,
double power,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Raises every matrix element to a power.
|
static void |
opencv_imgproc.preCornerDetect(opencv_core.Mat src,
opencv_core.Mat dst,
int ksize) |
static void |
opencv_imgproc.preCornerDetect(opencv_core.Mat src,
opencv_core.Mat dst,
int ksize,
int borderType)
\brief Calculates a feature map for corner detection.
|
void |
opencv_face.FaceRecognizer.predict_collect(opencv_core.Mat src,
opencv_face.PredictCollector collector)
\brief - if implemented - send all result of prediction to collector that can be used for somehow custom result handling
|
int |
opencv_face.FaceRecognizer.predict_label(opencv_core.Mat src)
\overload
|
float |
opencv_ml.StatModel.predict(opencv_core.Mat samples) |
float |
opencv_ml.EM.predict(opencv_core.Mat samples) |
float |
opencv_ml.LogisticRegression.predict(opencv_core.Mat samples) |
opencv_core.Mat |
opencv_video.KalmanFilter.predict(opencv_core.Mat control)
\brief Computes a predicted state.
|
opencv_core.Mat |
opencv_tracking.UnscentedKalmanFilter.predict(opencv_core.Mat control)
The function performs prediction step of the algorithm
|
void |
opencv_face.FaceRecognizer.predict(opencv_core.Mat src,
int[] label,
double[] confidence) |
void |
opencv_face.FaceRecognizer.predict(opencv_core.Mat src,
IntBuffer label,
DoubleBuffer confidence) |
void |
opencv_face.FaceRecognizer.predict(opencv_core.Mat src,
org.bytedeco.javacpp.IntPointer label,
org.bytedeco.javacpp.DoublePointer confidence)
\brief Predicts a label and associated confidence (e.g.
|
float |
opencv_ml.StatModel.predict(opencv_core.Mat samples,
opencv_core.Mat results,
int flags)
\brief Predicts response(s) for the provided sample(s)
|
float |
opencv_ml.EM.predict(opencv_core.Mat samples,
opencv_core.Mat results,
int flags)
\brief Returns posterior probabilities for the provided samples
|
float |
opencv_ml.LogisticRegression.predict(opencv_core.Mat samples,
opencv_core.Mat results,
int flags)
\brief Predicts responses for input samples and returns a float type.
|
opencv_core.Point2d |
opencv_ml.EM.predict2(opencv_core.Mat sample,
opencv_core.Mat probs)
\brief Returns a likelihood logarithm value and an index of the most probable mixture component
for the given sample.
|
static int |
opencv_core.predictOptimalVectorWidth(opencv_core.Mat src1) |
static int |
opencv_core.predictOptimalVectorWidth(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat src3,
opencv_core.Mat src4,
opencv_core.Mat src5,
opencv_core.Mat src6,
opencv_core.Mat src7,
opencv_core.Mat src8,
opencv_core.Mat src9,
int strat) |
static int |
opencv_core.predictOptimalVectorWidthMax(opencv_core.Mat src1) |
static int |
opencv_core.predictOptimalVectorWidthMax(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat src3,
opencv_core.Mat src4,
opencv_core.Mat src5,
opencv_core.Mat src6,
opencv_core.Mat src7,
opencv_core.Mat src8,
opencv_core.Mat src9) |
float |
opencv_ml.NormalBayesClassifier.predictProb(opencv_core.Mat inputs,
opencv_core.Mat outputs,
opencv_core.Mat outputProbs) |
float |
opencv_ml.NormalBayesClassifier.predictProb(opencv_core.Mat inputs,
opencv_core.Mat outputs,
opencv_core.Mat outputProbs,
int flags)
\brief Predicts the response for sample(s).
|
static int |
opencv_core.print(opencv_core.Mat mtx) |
static int |
opencv_core.print(opencv_core.Mat mtx,
org.bytedeco.javacpp.Pointer stream) |
void |
opencv_photo.MergeMertens.process(opencv_core.GpuMatVector src,
opencv_core.Mat dst) |
void |
opencv_photo.CalibrateCRF.process(opencv_core.GpuMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times) |
void |
opencv_photo.MergeDebevec.process(opencv_core.GpuMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times) |
void |
opencv_photo.MergeRobertson.process(opencv_core.GpuMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times) |
void |
opencv_photo.MergeExposures.process(opencv_core.GpuMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.MergeDebevec.process(opencv_core.GpuMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.MergeMertens.process(opencv_core.GpuMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.MergeRobertson.process(opencv_core.GpuMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.AlignExposures.process(opencv_core.GpuMatVector src,
opencv_core.MatVector dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.AlignMTB.process(opencv_core.GpuMatVector src,
opencv_core.MatVector dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_objdetect.DetectionBasedTracker.process(opencv_core.Mat imageGray) |
void |
opencv_photo.Tonemap.process(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Tonemaps image
|
void |
opencv_stitching.Timelapser.process(opencv_core.Mat img,
opencv_core.Mat mask,
opencv_core.Point tl) |
void |
opencv_photo.MergeMertens.process(opencv_core.MatVector src,
opencv_core.Mat dst)
\brief Short version of process, that doesn't take extra arguments.
|
void |
opencv_photo.CalibrateCRF.process(opencv_core.MatVector src,
opencv_core.Mat dst,
opencv_core.Mat times)
\brief Recovers inverse camera response.
|
void |
opencv_photo.MergeDebevec.process(opencv_core.MatVector src,
opencv_core.Mat dst,
opencv_core.Mat times) |
void |
opencv_photo.MergeRobertson.process(opencv_core.MatVector src,
opencv_core.Mat dst,
opencv_core.Mat times) |
void |
opencv_photo.MergeExposures.process(opencv_core.MatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response)
\brief Merges images.
|
void |
opencv_photo.MergeDebevec.process(opencv_core.MatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.MergeMertens.process(opencv_core.MatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.MergeRobertson.process(opencv_core.MatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.AlignExposures.process(opencv_core.MatVector src,
opencv_core.MatVector dst,
opencv_core.Mat times,
opencv_core.Mat response)
\brief Aligns images
|
void |
opencv_photo.AlignMTB.process(opencv_core.MatVector src,
opencv_core.MatVector dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_videostab.IOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.Mat points0,
opencv_core.Mat points1,
opencv_core.Mat mask) |
void |
opencv_videostab.NullOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.Mat points0,
opencv_core.Mat points1,
opencv_core.Mat mask) |
void |
opencv_videostab.TranslationBasedLocalOutlierRejector.process(opencv_core.Size frameSize,
opencv_core.Mat points0,
opencv_core.Mat points1,
opencv_core.Mat mask) |
void |
opencv_photo.MergeMertens.process(opencv_core.UMatVector src,
opencv_core.Mat dst) |
void |
opencv_photo.CalibrateCRF.process(opencv_core.UMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times) |
void |
opencv_photo.MergeDebevec.process(opencv_core.UMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times) |
void |
opencv_photo.MergeRobertson.process(opencv_core.UMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times) |
void |
opencv_photo.MergeExposures.process(opencv_core.UMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.MergeDebevec.process(opencv_core.UMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.MergeMertens.process(opencv_core.UMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.MergeRobertson.process(opencv_core.UMatVector src,
opencv_core.Mat dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.AlignExposures.process(opencv_core.UMatVector src,
opencv_core.MatVector dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_photo.AlignMTB.process(opencv_core.UMatVector src,
opencv_core.MatVector dst,
opencv_core.Mat times,
opencv_core.Mat response) |
void |
opencv_ximgproc.GraphSegmentation.processImage(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Segment an image and store output in dst
|
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.processNoiseCov(opencv_core.Mat processNoiseCov) |
opencv_tracking.UnscentedKalmanFilterParams |
opencv_tracking.UnscentedKalmanFilterParams.processNoiseCov(opencv_core.Mat processNoiseCov) |
opencv_core.Mat |
opencv_core.PCA.project(opencv_core.Mat vec)
\brief Projects vector(s) to the principal component subspace.
|
opencv_core.Mat |
opencv_core.LDA.project(opencv_core.Mat src)
Projects samples into the LDA subspace.
|
opencv_core.Mat |
opencv_face.FacemarkLBF.BBox.project(opencv_core.Mat shape) |
void |
opencv_core.PCA.project(opencv_core.Mat vec,
opencv_core.Mat result)
\overload
|
static void |
opencv_calib3d.projectPoints(opencv_core.GpuMat objectPoints,
opencv_core.GpuMat imagePoints,
opencv_core.Mat affine,
opencv_core.GpuMat K,
opencv_core.GpuMat D) |
static void |
opencv_calib3d.projectPoints(opencv_core.GpuMat objectPoints,
opencv_core.GpuMat imagePoints,
opencv_core.Mat affine,
opencv_core.GpuMat K,
opencv_core.GpuMat D,
double alpha,
opencv_core.GpuMat jacobian) |
static void |
opencv_calib3d.projectPoints(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat affine,
opencv_core.Mat K,
opencv_core.Mat D) |
static void |
opencv_calib3d.projectPoints(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat affine,
opencv_core.Mat K,
opencv_core.Mat D,
double alpha,
opencv_core.Mat jacobian)
\brief Projects points using fisheye model
|
static void |
opencv_calib3d.projectPoints(opencv_core.Mat objectPoints,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat imagePoints) |
static void |
opencv_calib3d.projectPoints(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
opencv_core.Mat K,
opencv_core.Mat D,
double alpha,
opencv_core.Mat jacobian)
\overload
|
static void |
opencv_calib3d.projectPoints(opencv_core.Mat objectPoints,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat imagePoints,
opencv_core.Mat jacobian,
double aspectRatio)
\brief Projects 3D points to an image plane.
|
static void |
opencv_calib3d.projectPoints(opencv_core.UMat objectPoints,
opencv_core.UMat imagePoints,
opencv_core.Mat affine,
opencv_core.UMat K,
opencv_core.UMat D) |
static void |
opencv_calib3d.projectPoints(opencv_core.UMat objectPoints,
opencv_core.UMat imagePoints,
opencv_core.Mat affine,
opencv_core.UMat K,
opencv_core.UMat D,
double alpha,
opencv_core.UMat jacobian) |
static double |
opencv_core.PSNR(opencv_core.Mat src1,
opencv_core.Mat src2)
\brief Computes the Peak Signal-to-Noise Ratio (PSNR) image quality metric.
|
opencv_core.MatVector |
opencv_core.MatVector.push_back(opencv_core.Mat value) |
void |
opencv_core.Mat.push_back(opencv_core.Mat m)
\overload
|
opencv_dnn.MatPointerVector |
opencv_dnn.MatPointerVector.push_back(opencv_core.Mat value) |
opencv_core.MatVector |
opencv_core.MatVector.put(long i,
opencv_core.Mat value) |
opencv_dnn.MatPointerVector |
opencv_dnn.MatPointerVector.put(long i,
opencv_core.Mat value) |
opencv_core.MatVector |
opencv_core.MatVector.put(opencv_core.Mat... array) |
opencv_dnn.MatPointerVector |
opencv_dnn.MatPointerVector.put(opencv_core.Mat... array) |
opencv_core.MatVector |
opencv_core.MatVector.put(opencv_core.Mat value) |
opencv_core.Mat |
opencv_core.Mat.put(opencv_core.Mat m)
\brief assignment operators
|
opencv_core.SparseMat |
opencv_core.SparseMat.put(opencv_core.Mat m)
equivalent to the corresponding constructor
|
opencv_dnn.MatPointerVector |
opencv_dnn.MatPointerVector.put(opencv_core.Mat value) |
opencv_core.MatBytePairVector |
opencv_core.MatBytePairVector.put(opencv_core.Mat[] firstValue,
byte[] secondValue) |
static void |
opencv_imgproc.putText(opencv_core.Mat img,
org.bytedeco.javacpp.BytePointer text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color) |
static void |
opencv_imgproc.putText(opencv_core.Mat img,
org.bytedeco.javacpp.BytePointer text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color,
int thickness,
int lineType,
boolean bottomLeftOrigin)
\brief Draws a text string.
|
static void |
opencv_imgproc.putText(opencv_core.Mat img,
String text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color) |
static void |
opencv_imgproc.putText(opencv_core.Mat img,
String text,
opencv_core.Point org,
int fontFace,
double fontScale,
opencv_core.Scalar color,
int thickness,
int lineType,
boolean bottomLeftOrigin) |
static void |
opencv_cudawarping.pyrDown(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.pyrDown(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.pyrDown(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dstsize,
int borderType)
\brief Blurs an image and downsamples it.
|
static void |
opencv_cudawarping.pyrDown(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Smoothes an image and downsamples it.
|
static void |
opencv_imgproc.pyrMeanShiftFiltering(opencv_core.Mat src,
opencv_core.Mat dst,
double sp,
double sr) |
static void |
opencv_imgproc.pyrMeanShiftFiltering(opencv_core.Mat src,
opencv_core.Mat dst,
double sp,
double sr,
int maxLevel,
opencv_core.TermCriteria termcrit)
\addtogroup imgproc_filter
\{
|
static void |
opencv_cudawarping.pyrUp(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.pyrUp(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_imgproc.pyrUp(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dstsize,
int borderType)
\brief Upsamples an image and then blurs it.
|
static void |
opencv_cudawarping.pyrUp(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Upsamples an image and then smoothes it.
|
opencv_face.FacemarkAAM.Model |
opencv_face.FacemarkAAM.Model.Q(opencv_core.Mat Q) |
opencv_face.FacemarkAAM.Config |
opencv_face.FacemarkAAM.Config.R(opencv_core.Mat R) |
opencv_stitching.CameraParams |
opencv_stitching.CameraParams.R(opencv_core.Mat R) |
static void |
opencv_img_hash.radialVarianceHash(opencv_core.Mat inputArr,
opencv_core.Mat outputArr) |
static void |
opencv_img_hash.radialVarianceHash(opencv_core.Mat inputArr,
opencv_core.Mat outputArr,
double sigma,
int numOfAngleLine)
\brief Computes radial variance hash of the input image
|
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance,
opencv_core.GpuMatVector masks,
boolean compactResult) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance,
opencv_core.MatVector masks,
boolean compactResult)
\overload
|
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.Mat queryDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance,
opencv_core.UMatVector masks,
boolean compactResult) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance) |
void |
opencv_features2d.DescriptorMatcher.radiusMatch(opencv_core.Mat queryDescriptors,
opencv_core.Mat trainDescriptors,
opencv_core.DMatchVectorVector matches,
float maxDistance,
opencv_core.Mat mask,
boolean compactResult)
\brief For each query descriptor, finds the training descriptors not farther than the specified distance.
|
int |
opencv_flann.Index.radiusSearch(opencv_core.Mat query,
opencv_core.Mat indices,
opencv_core.Mat dists,
double radius,
int maxResults) |
int |
opencv_flann.Index.radiusSearch(opencv_core.Mat query,
opencv_core.Mat indices,
opencv_core.Mat dists,
double radius,
int maxResults,
opencv_flann.SearchParams params) |
static void |
opencv_ml.randMVNormal(opencv_core.Mat mean,
opencv_core.Mat cov,
int nsamples,
opencv_core.Mat samples)
\brief Generates _sample_ from multivariate normal distribution
|
static void |
opencv_core.randn(opencv_core.Mat dst,
opencv_core.Mat mean,
opencv_core.Mat stddev)
\brief Fills the array with normally distributed random numbers.
|
static void |
opencv_core.randShuffle(opencv_core.Mat dst) |
static void |
opencv_core.randShuffle(opencv_core.Mat dst,
double iterFactor,
opencv_core.RNG rng)
\brief Shuffles the array elements randomly.
|
static void |
opencv_core.randu(opencv_core.Mat dst,
opencv_core.Mat low,
opencv_core.Mat high)
\brief Generates a single uniformly-distributed random number or an array of random numbers.
|
static void |
opencv_core.read(opencv_core.FileNode node,
opencv_core.Mat mat) |
static void |
opencv_core.read(opencv_core.FileNode node,
opencv_core.Mat mat,
opencv_core.Mat default_mat) |
boolean |
opencv_videoio.VideoCapture.read(opencv_core.Mat image)
\brief Grabs, decodes and returns the next video frame.
|
static int |
opencv_ximgproc.readGT(org.bytedeco.javacpp.BytePointer src_path,
opencv_core.Mat dst)
\brief Function for reading ground truth disparity maps.
|
static int |
opencv_ximgproc.readGT(String src_path,
opencv_core.Mat dst) |
opencv_core.Mat |
opencv_core.LDA.reconstruct(opencv_core.Mat src)
Reconstructs projections from the LDA subspace.
|
static int |
opencv_calib3d.recoverPose(opencv_core.Mat E,
opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat R,
opencv_core.Mat t) |
static int |
opencv_calib3d.recoverPose(opencv_core.Mat E,
opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat R,
opencv_core.Mat t,
double focal,
opencv_core.Point2d pp,
opencv_core.Mat mask)
\overload
|
static int |
opencv_calib3d.recoverPose(opencv_core.Mat E,
opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat cameraMatrix,
opencv_core.Mat R,
opencv_core.Mat t) |
static int |
opencv_calib3d.recoverPose(opencv_core.Mat E,
opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat cameraMatrix,
opencv_core.Mat R,
opencv_core.Mat t,
double distanceThresh) |
static int |
opencv_calib3d.recoverPose(opencv_core.Mat E,
opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat cameraMatrix,
opencv_core.Mat R,
opencv_core.Mat t,
double distanceThresh,
opencv_core.Mat mask,
opencv_core.Mat triangulatedPoints)
\overload
|
static int |
opencv_calib3d.recoverPose(opencv_core.Mat E,
opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat cameraMatrix,
opencv_core.Mat R,
opencv_core.Mat t,
opencv_core.Mat mask)
\brief Recover relative camera rotation and translation from an estimated essential matrix and the
corresponding points in two images, using cheirality check.
|
static void |
opencv_imgproc.rectangle(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color) |
static void |
opencv_imgproc.rectangle(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
\brief Draws a simple, thick, or filled up-right rectangle.
|
static void |
opencv_imgproc.rectangle(opencv_core.Mat img,
opencv_core.Rect rec,
opencv_core.Scalar color) |
static void |
opencv_imgproc.rectangle(opencv_core.Mat img,
opencv_core.Rect rec,
opencv_core.Scalar color,
int thickness,
int lineType,
int shift)
\overload
|
static float |
opencv_calib3d.rectify3Collinear(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Mat cameraMatrix3,
opencv_core.Mat distCoeffs3,
opencv_core.GpuMatVector imgpt1,
opencv_core.GpuMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.Mat R12,
opencv_core.Mat T12,
opencv_core.Mat R13,
opencv_core.Mat T13,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat R3,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat P3,
opencv_core.Mat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Mat cameraMatrix3,
opencv_core.Mat distCoeffs3,
opencv_core.MatVector imgpt1,
opencv_core.MatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.Mat R12,
opencv_core.Mat T12,
opencv_core.Mat R13,
opencv_core.Mat T13,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat R3,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat P3,
opencv_core.Mat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags)
computes the rectification transformations for 3-head camera, where all the heads are on the same line.
|
static float |
opencv_calib3d.rectify3Collinear(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Mat cameraMatrix3,
opencv_core.Mat distCoeffs3,
opencv_core.UMatVector imgpt1,
opencv_core.UMatVector imgpt3,
opencv_core.Size imageSize,
opencv_core.Mat R12,
opencv_core.Mat T12,
opencv_core.Mat R13,
opencv_core.Mat T13,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat R3,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat P3,
opencv_core.Mat Q,
double alpha,
opencv_core.Size newImgSize,
opencv_core.Rect roi1,
opencv_core.Rect roi2,
int flags) |
static void |
opencv_cudaarithm.rectStdDev(opencv_core.Mat src,
opencv_core.Mat sqr,
opencv_core.Mat dst,
opencv_core.Rect rect) |
static void |
opencv_cudaarithm.rectStdDev(opencv_core.Mat src,
opencv_core.Mat sqr,
opencv_core.Mat dst,
opencv_core.Rect rect,
opencv_core.Stream stream)
\brief Computes a standard deviation of integral images.
|
static void |
opencv_core.reduce(opencv_core.Mat src,
opencv_core.Mat dst,
int dim,
int rtype) |
static void |
opencv_cudaarithm.reduce(opencv_core.Mat mtx,
opencv_core.Mat vec,
int dim,
int reduceOp) |
static void |
opencv_core.reduce(opencv_core.Mat src,
opencv_core.Mat dst,
int dim,
int rtype,
int dtype)
\brief Reduces a matrix to a vector.
|
static void |
opencv_cudaarithm.reduce(opencv_core.Mat mtx,
opencv_core.Mat vec,
int dim,
int reduceOp,
int dtype,
opencv_core.Stream stream)
\brief Reduces a matrix to a vector.
|
static void |
opencv_aruco.refineDetectedMarkers(opencv_core.Mat image,
opencv_aruco.Board board,
opencv_core.GpuMatVector detectedCorners,
opencv_core.Mat detectedIds,
opencv_core.GpuMatVector rejectedCorners) |
static void |
opencv_aruco.refineDetectedMarkers(opencv_core.Mat image,
opencv_aruco.Board board,
opencv_core.GpuMatVector detectedCorners,
opencv_core.Mat detectedIds,
opencv_core.GpuMatVector rejectedCorners,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
float minRepDistance,
float errorCorrectionRate,
boolean checkAllOrders,
opencv_core.Mat recoveredIdxs,
opencv_aruco.DetectorParameters parameters) |
static void |
opencv_aruco.refineDetectedMarkers(opencv_core.Mat image,
opencv_aruco.Board board,
opencv_core.MatVector detectedCorners,
opencv_core.Mat detectedIds,
opencv_core.MatVector rejectedCorners) |
static void |
opencv_aruco.refineDetectedMarkers(opencv_core.Mat image,
opencv_aruco.Board board,
opencv_core.MatVector detectedCorners,
opencv_core.Mat detectedIds,
opencv_core.MatVector rejectedCorners,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
float minRepDistance,
float errorCorrectionRate,
boolean checkAllOrders,
opencv_core.Mat recoveredIdxs,
opencv_aruco.DetectorParameters parameters)
\brief Refind not detected markers based on the already detected and the board layout
|
static void |
opencv_aruco.refineDetectedMarkers(opencv_core.Mat image,
opencv_aruco.Board board,
opencv_core.UMatVector detectedCorners,
opencv_core.Mat detectedIds,
opencv_core.UMatVector rejectedCorners) |
static void |
opencv_aruco.refineDetectedMarkers(opencv_core.Mat image,
opencv_aruco.Board board,
opencv_core.UMatVector detectedCorners,
opencv_core.Mat detectedIds,
opencv_core.UMatVector rejectedCorners,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
float minRepDistance,
float errorCorrectionRate,
boolean checkAllOrders,
opencv_core.Mat recoveredIdxs,
opencv_aruco.DetectorParameters parameters) |
static void |
opencv_core.registerPageLocked(opencv_core.Mat m)
\brief Page-locks the memory of matrix and maps it for the device(s).
|
static void |
opencv_cudawarping.remap(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat xmap,
opencv_core.Mat ymap,
int interpolation) |
static void |
opencv_imgproc.remap(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat map1,
opencv_core.Mat map2,
int interpolation) |
static void |
opencv_imgproc.remap(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat map1,
opencv_core.Mat map2,
int interpolation,
int borderMode,
opencv_core.Scalar borderValue)
\brief Applies a generic geometrical transformation to an image.
|
static void |
opencv_cudawarping.remap(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat xmap,
opencv_core.Mat ymap,
int interpolation,
int borderMode,
opencv_core.Scalar borderValue,
opencv_core.Stream stream)
\addtogroup cudawarping
\{
|
void |
opencv_plot.Plot2d.render(opencv_core.Mat _plotResult) |
static opencv_core.Mat |
opencv_core.repeat(opencv_core.Mat src,
int ny,
int nx)
\overload
|
static void |
opencv_core.repeat(opencv_core.Mat src,
int ny,
int nx,
opencv_core.Mat dst)
\brief Fills the output array with repeated copies of the input array.
|
opencv_core.Mat |
opencv_face.FacemarkLBF.BBox.reproject(opencv_core.Mat shape) |
static void |
opencv_calib3d.reprojectImageTo3D(opencv_core.Mat disparity,
opencv_core.Mat _3dImage,
opencv_core.Mat Q) |
static void |
opencv_calib3d.reprojectImageTo3D(opencv_core.Mat disparity,
opencv_core.Mat _3dImage,
opencv_core.Mat Q,
boolean handleMissingValues,
int ddepth)
\brief Reprojects a disparity image to 3D space.
|
static void |
opencv_cudawarping.resize(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize) |
static void |
opencv_imgproc.resize(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize) |
static void |
opencv_imgproc.resize(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize,
double fx,
double fy,
int interpolation)
\} imgproc_filter
|
static void |
opencv_cudawarping.resize(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize,
double fx,
double fy,
int interpolation,
opencv_core.Stream stream)
\brief Resizes an image.
|
boolean |
opencv_videoio.VideoCapture.retrieve(opencv_core.Mat image) |
boolean |
opencv_videoio.VideoCapture.retrieve(opencv_core.Mat image,
int flag)
\brief Decodes and returns the grabbed video frame.
|
static void |
opencv_calib3d.Rodrigues(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_calib3d.Rodrigues(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat jacobian)
\brief Converts a rotation matrix to a rotation vector or vice versa.
|
static void |
opencv_ximgproc.rollingGuidanceFilter(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_ximgproc.rollingGuidanceFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int numOfIter,
int borderType)
\brief Applies the rolling guidance filter to an image.
|
static void |
opencv_core.rotate(opencv_core.Mat src,
opencv_core.Mat dst,
int rotateCode)
\brief Rotates a 2D array in multiples of 90 degrees.
|
static void |
opencv_cudawarping.rotate(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize,
double angle) |
static void |
opencv_cudawarping.rotate(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize,
double angle,
double xShift,
double yShift,
int interpolation,
opencv_core.Stream stream)
\brief Rotates an image around the origin (0,0) and then shifts it.
|
static int |
opencv_imgproc.rotatedRectangleIntersection(opencv_core.RotatedRect rect1,
opencv_core.RotatedRect rect2,
opencv_core.Mat intersectingRegion)
\brief Finds out if there is any intersection between two rotated rectangles.
|
static opencv_core.Point3d |
opencv_calib3d.RQDecomp3x3(opencv_core.Mat src,
opencv_core.Mat mtxR,
opencv_core.Mat mtxQ) |
static opencv_core.Point3d |
opencv_calib3d.RQDecomp3x3(opencv_core.Mat src,
opencv_core.Mat mtxR,
opencv_core.Mat mtxQ,
opencv_core.Mat Qx,
opencv_core.Mat Qy,
opencv_core.Mat Qz)
\brief Computes an RQ decomposition of 3x3 matrices.
|
static void |
opencv_cudaarithm.rshift(opencv_core.Mat src,
opencv_core.Scalar4i val,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.rshift(opencv_core.Mat src,
opencv_core.Scalar4i val,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Performs pixel by pixel right shift of an image by a constant value.
|
void |
opencv_bioinspired.Retina.run(opencv_core.Mat inputImage)
\brief Method which allows retina to be applied on an input image,
|
void |
opencv_bioinspired.TransientAreasSegmentationModule.run(opencv_core.Mat inputToSegment) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level)
\brief Recognize text using the tesseract-ocr API.
|
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level)
\brief Recognize text using HMM.
|
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level)
\brief Recognize text using Beam Search.
|
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_bioinspired.TransientAreasSegmentationModule.run(opencv_core.Mat inputToSegment,
int channelIndex)
\brief main processing method, get result using methods getSegmentationPicture()
|
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
int min_confidence) |
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
int min_confidence) |
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
int min_confidence) |
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
int min_confidence,
int component_level) |
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
int min_confidence,
int component_level) |
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
int min_confidence,
int component_level) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level)
\brief Recognize text using HMM.
|
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
opencv_core.Mat mask,
org.bytedeco.javacpp.BytePointer output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level)
\brief Recognize text using a segmentation based word-spotting/classifier cnn.
|
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
opencv_core.Mat mask,
int min_confidence) |
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
int min_confidence) |
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
int min_confidence) |
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
opencv_core.Mat mask,
int min_confidence,
int component_level) |
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
int min_confidence,
int component_level) |
org.bytedeco.javacpp.BytePointer |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
int min_confidence,
int component_level) |
void |
opencv_videostab.IDenseOptFlowEstimator.run(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat flowX,
opencv_core.Mat flowY,
opencv_core.Mat errors) |
void |
opencv_videostab.ISparseOptFlowEstimator.run(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat points0,
opencv_core.Mat points1,
opencv_core.Mat status,
opencv_core.Mat errors) |
void |
opencv_videostab.SparsePyrLkOptFlowEstimator.run(opencv_core.Mat frame0,
opencv_core.Mat frame1,
opencv_core.Mat points0,
opencv_core.Mat points1,
opencv_core.Mat status,
opencv_core.Mat errors) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
opencv_core.Mat mask,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.ERFilter.run(opencv_core.Mat image,
opencv_text.ERStatVector regions)
\brief The key method of ERFilter algorithm.
|
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
String output_text) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
String output_text) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
String output_text) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
String output_text) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
String output_text) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
float[] component_confidences,
int component_level) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
FloatBuffer component_confidences,
int component_level) |
void |
opencv_text.BaseOCR.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRTesseract.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRHMMDecoder.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRBeamSearchDecoder.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
void |
opencv_text.OCRHolisticWordRecognizer.run(opencv_core.Mat image,
String output_text,
opencv_core.RectVector component_rects,
opencv_text.StdStringVector component_texts,
org.bytedeco.javacpp.FloatPointer component_confidences,
int component_level) |
static void |
opencv_features2d.KeyPointsFilter.runByPixelsMask(opencv_core.KeyPointVector keypoints,
opencv_core.Mat mask) |
opencv_face.FacemarkAAM.Model |
opencv_face.FacemarkAAM.Model.S(opencv_core.Mat S) |
void |
opencv_tracking.TrackerSampler.sampling(opencv_core.Mat image,
opencv_core.Rect boundingBox)
\brief Computes the regions starting from a position in an image
|
boolean |
opencv_tracking.TrackerSamplerAlgorithm.sampling(opencv_core.Mat image,
opencv_core.Rect boundingBox,
opencv_core.MatVector sample)
\brief Computes the regions starting from a position in an image.
|
boolean |
opencv_tracking.TrackerSamplerCS.samplingImpl(opencv_core.Mat image,
opencv_core.Rect boundingBox,
opencv_core.MatVector sample) |
static double |
opencv_calib3d.sampsonDistance(opencv_core.Mat pt1,
opencv_core.Mat pt2,
opencv_core.Mat F)
\brief Calculates the Sampson Distance between two points.
|
static void |
opencv_core.scaleAdd(opencv_core.Mat src1,
double alpha,
opencv_core.Mat src2,
opencv_core.Mat dst)
\brief Calculates the sum of a scaled array and another array.
|
static void |
opencv_cudaarithm.scaleAdd(opencv_core.Mat src1,
double alpha,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.scaleAdd(opencv_core.Mat src1,
double alpha,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Stream stream)
adds scaled array to another one (dst = alpha*src1 + src2)
|
static void |
opencv_imgproc.Scharr(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
int dx,
int dy) |
static void |
opencv_imgproc.Scharr(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
int dx,
int dy,
double scale,
double delta,
int borderType)
\brief Calculates the first x- or y- image derivative using Scharr operator.
|
static void |
opencv_photo.seamlessClone(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat mask,
opencv_core.Point p,
opencv_core.Mat blend,
int flags)
\brief Image editing tasks concern either global changes (color/intensity corrections, filters,
deformations) or local changes concerned to a selection.
|
static void |
opencv_optflow.segmentMotion(opencv_core.Mat mhi,
opencv_core.Mat segmask,
opencv_core.RectVector boundingRects,
double timestamp,
double segThresh)
\brief Splits a motion history image into a few parts corresponding to separate independent motions (for
example, left hand, right hand).
|
void |
opencv_tracking.TrackerFeature.selection(opencv_core.Mat response,
int npoints)
\brief Identify most effective features
|
void |
opencv_tracking.TrackerFeatureFeature2d.selection(opencv_core.Mat response,
int npoints) |
void |
opencv_tracking.TrackerFeatureHOG.selection(opencv_core.Mat response,
int npoints) |
void |
opencv_tracking.TrackerFeatureHAAR.selection(opencv_core.Mat response,
int npoints)
\brief Identify most effective features
|
void |
opencv_tracking.TrackerFeatureLBP.selection(opencv_core.Mat response,
int npoints) |
static opencv_core.Rect |
opencv_highgui.selectROI(org.bytedeco.javacpp.BytePointer windowName,
opencv_core.Mat img) |
static opencv_core.Rect |
opencv_highgui.selectROI(org.bytedeco.javacpp.BytePointer windowName,
opencv_core.Mat img,
boolean showCrosshair,
boolean fromCenter)
\brief Selects ROI on the given image.
|
static opencv_core.Rect |
opencv_highgui.selectROI(opencv_core.Mat img) |
static opencv_core.Rect |
opencv_highgui.selectROI(opencv_core.Mat img,
boolean showCrosshair,
boolean fromCenter)
\overload
|
static opencv_core.Rect |
opencv_highgui.selectROI(String windowName,
opencv_core.Mat img) |
static opencv_core.Rect |
opencv_highgui.selectROI(String windowName,
opencv_core.Mat img,
boolean showCrosshair,
boolean fromCenter) |
static void |
opencv_highgui.selectROIs(org.bytedeco.javacpp.BytePointer windowName,
opencv_core.Mat img,
opencv_core.RectVector boundingBoxes) |
static void |
opencv_highgui.selectROIs(org.bytedeco.javacpp.BytePointer windowName,
opencv_core.Mat img,
opencv_core.RectVector boundingBoxes,
boolean showCrosshair,
boolean fromCenter)
\brief Selects ROIs on the given image.
|
static void |
opencv_highgui.selectROIs(String windowName,
opencv_core.Mat img,
opencv_core.RectVector boundingBoxes) |
static void |
opencv_highgui.selectROIs(String windowName,
opencv_core.Mat img,
opencv_core.RectVector boundingBoxes,
boolean showCrosshair,
boolean fromCenter) |
static void |
opencv_imgproc.sepFilter2D(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Mat kernelX,
opencv_core.Mat kernelY) |
static void |
opencv_imgproc.sepFilter2D(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
opencv_core.Mat kernelX,
opencv_core.Mat kernelY,
opencv_core.Point anchor,
double delta,
int borderType)
\brief Applies a separable linear filter to an image.
|
void |
opencv_ximgproc.SelectiveSearchSegmentation.setBaseImage(opencv_core.Mat img)
\brief Set a image used by switch* functions to initialize the class
|
void |
opencv_stitching.ProjectorBase.setCameraParams(opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T) |
void |
opencv_ml.SVM.setClassWeights(opencv_core.Mat val)
\copybrief getClassWeights @see getClassWeights
|
void |
opencv_tracking.TrackerStateEstimatorMILBoosting.TrackerMILTargetState.setFeatures(opencv_core.Mat features)
\brief Set the features extracted from TrackerFeatureSet
|
static void |
opencv_core.setIdentity(opencv_core.Mat mtx) |
static void |
opencv_core.setIdentity(opencv_core.Mat mtx,
opencv_core.Scalar s)
\brief Initializes a scaled identity matrix.
|
void |
opencv_tracking.CvHaarEvaluator.setImage(opencv_core.Mat img) |
void |
opencv_tracking.CvFeatureEvaluator.setImage(opencv_core.Mat img,
byte clsLabel,
int idx) |
void |
opencv_tracking.CvHaarEvaluator.setImage(opencv_core.Mat img,
byte clsLabel,
int idx) |
void |
opencv_tracking.CvHOGEvaluator.setImage(opencv_core.Mat img,
byte clsLabel,
int idx) |
void |
opencv_tracking.CvLBPEvaluator.setImage(opencv_core.Mat img,
byte clsLabel,
int idx) |
void |
opencv_ximgproc.SelectiveSearchSegmentationStrategy.setImage(opencv_core.Mat img,
opencv_core.Mat regions,
opencv_core.Mat sizes) |
void |
opencv_ximgproc.SelectiveSearchSegmentationStrategy.setImage(opencv_core.Mat img,
opencv_core.Mat regions,
opencv_core.Mat sizes,
int image_id)
\brief Set a initial image, with a segementation.
|
void |
opencv_shape.ShapeContextDistanceExtractor.setImages(opencv_core.Mat image1,
opencv_core.Mat image2)
\brief Set the images that correspond to each shape.
|
void |
opencv_tracking.TrackerCSRT.setInitialMask(opencv_core.Mat mask) |
void |
opencv_core.DownhillSolver.setInitStep(opencv_core.Mat step)
\brief Sets the initial step that will be used in downhill simplex algorithm.
|
void |
opencv_dnn.Net.setInput(opencv_core.Mat blob) |
void |
opencv_dnn.Net.setInput(opencv_core.Mat blob,
org.bytedeco.javacpp.BytePointer name,
double scalefactor,
opencv_core.Scalar mean)
\brief Sets the new input value for the network
|
void |
opencv_dnn.Net.setInput(opencv_core.Mat blob,
String name,
double scalefactor,
opencv_core.Scalar mean) |
void |
opencv_ml.ANN_MLP.setLayerSizes(opencv_core.Mat _layer_sizes)
Integer vector specifying the number of neurons in each layer including the input and output layers.
|
void |
opencv_dnn.Net.setParam(opencv_dnn.DictValue layer,
int numParam,
opencv_core.Mat blob)
\brief Sets the new value for the learned param of the layer.
|
void |
opencv_ml.DTrees.setPriors(opencv_core.Mat val)
\copybrief getPriors @see getPriors
|
void |
opencv_stitching.BundleAdjusterBase.setRefinementMask(opencv_core.Mat mask) |
void |
opencv_objdetect.HOGDescriptor.setSVMDetector(opencv_core.Mat _svmdetector)
\brief Sets coefficients for the linear SVM classifier.
|
void |
opencv_cudaobjdetect.HOG.setSVMDetector(opencv_core.Mat detector)
\brief Sets coefficients for the linear SVM classifier.
|
void |
opencv_tracking.TrackerStateEstimatorAdaBoosting.TrackerAdaBoostingTargetState.setTargetResponses(opencv_core.Mat responses)
\brief Set the features extracted from TrackerFeatureSet
|
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.Mat templ) |
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.Mat edges,
opencv_core.Mat dx,
opencv_core.Mat dy) |
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.Mat edges,
opencv_core.Mat dx,
opencv_core.Mat dy,
opencv_core.Point templCenter) |
void |
opencv_imgproc.GeneralizedHough.setTemplate(opencv_core.Mat templ,
opencv_core.Point templCenter)
set template to search
|
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.Mat value) |
opencv_core.UMat |
opencv_core.UMat.setTo(opencv_core.Mat value) |
opencv_core.Mat |
opencv_core.Mat.setTo(opencv_core.Mat value,
opencv_core.Mat mask)
\brief Sets all or some of the array elements to the specified value.
|
opencv_core.UMat |
opencv_core.UMat.setTo(opencv_core.Mat value,
opencv_core.Mat mask)
sets some of the matrix elements to s, according to the mask
|
opencv_core.GpuMat |
opencv_core.GpuMat.setTo(opencv_core.Scalar s,
opencv_core.Mat mask)
sets some of the GpuMat elements to s, according to the mask (Blocking call)
|
opencv_core.GpuMat |
opencv_core.GpuMat.setTo(opencv_core.Scalar s,
opencv_core.Mat mask,
opencv_core.Stream stream)
sets some of the GpuMat elements to s, according to the mask (Non-Blocking call)
|
void |
opencv_features2d.BOWImgDescriptorExtractor.setVocabulary(opencv_core.Mat vocabulary)
\brief Sets a visual vocabulary.
|
void |
opencv_dnn.LSTMLayer.setWeights(opencv_core.Mat Wh,
opencv_core.Mat Wx,
opencv_core.Mat b)
Deprecated.
Use LayerParams::blobs instead.
\brief Set trained weights for LSTM layer.
LSTM behavior on each step is defined by current input, previous output, previous cell state and learned weights. Let \f$x_t\f$ be current input, \f$h_t\f$ be current output, \f$c_t\f$ be current state. Than current output and current cell state is computed as follows: \f{eqnarray*}{ h_t &= o_t \odot tanh(c_t), \\ c_t &= f_t \odot c_{t-1} + i_t \odot g_t, \\ \f} where \f$\odot\f$ is per-element multiply operation and \f$i_t, f_t, o_t, g_t\f$ is internal gates that are computed using learned wights. Gates are computed as follows: \f{eqnarray*}{ i_t &= sigmoid&(W_{xi} x_t + W_{hi} h_{t-1} + b_i), \\ f_t &= sigmoid&(W_{xf} x_t + W_{hf} h_{t-1} + b_f), \\ o_t &= sigmoid&(W_{xo} x_t + W_{ho} h_{t-1} + b_o), \\ g_t &= tanh &(W_{xg} x_t + W_{hg} h_{t-1} + b_g), \\ \f} where \f$W_{x?}\f$, \f$W_{h?}\f$ and \f$b_{?}\f$ are learned weights represented as matrices: \f$W_{x?} \in R^{N_h \times N_x}\f$, \f$W_{h?} \in R^{N_h \times N_h}\f$, \f$b_? \in R^{N_h}\f$. For simplicity and performance purposes we use \f$ W_x = [W_{xi}; W_{xf}; W_{xo}, W_{xg}] \f$ (i.e. \f$W_x\f$ is vertical concatenation of \f$ W_{x?} \f$), \f$ W_x \in R^{4N_h \times N_x} \f$. The same for \f$ W_h = [W_{hi}; W_{hf}; W_{ho}, W_{hg}], W_h \in R^{4N_h \times N_h} \f$ and for \f$ b = [b_i; b_f, b_o, b_g]\f$, \f$b \in R^{4N_h} \f$. |
void |
opencv_dnn.RNNLayer.setWeights(opencv_core.Mat Wxh,
opencv_core.Mat bh,
opencv_core.Mat Whh,
opencv_core.Mat Who,
opencv_core.Mat bo)
Setups learned weights.
|
static org.bytedeco.javacpp.IntPointer |
opencv_dnn.shape(opencv_core.Mat mat) |
static org.bytedeco.javacpp.BytePointer |
opencv_core.shiftLeft(org.bytedeco.javacpp.BytePointer out,
opencv_core.Mat mtx) |
opencv_videoio.VideoWriter |
opencv_videoio.VideoWriter.shiftLeft(opencv_core.Mat image)
\brief Stream operator to write the next video frame.
|
static String |
opencv_core.shiftLeft(String out,
opencv_core.Mat mtx) |
void |
opencv_photo.AlignMTB.shiftMat(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Point shift)
\brief Helper function, that shift Mat filling new regions with zeros.
|
opencv_videoio.VideoCapture |
opencv_videoio.VideoCapture.shiftRight(opencv_core.Mat image)
\brief Stream operator to read the next video frame.
|
static opencv_core.Mat |
opencv_dnn.slice(opencv_core.Mat m,
opencv_dnn._Range r0) |
static opencv_core.Mat |
opencv_dnn.slice(opencv_core.Mat m,
opencv_dnn._Range r0,
opencv_dnn._Range r1) |
static opencv_core.Mat |
opencv_dnn.slice(opencv_core.Mat m,
opencv_dnn._Range r0,
opencv_dnn._Range r1,
opencv_dnn._Range r2) |
static opencv_core.Mat |
opencv_dnn.slice(opencv_core.Mat m,
opencv_dnn._Range r0,
opencv_dnn._Range r1,
opencv_dnn._Range r2,
opencv_dnn._Range r3) |
static void |
opencv_imgproc.Sobel(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
int dx,
int dy) |
static void |
opencv_imgproc.Sobel(opencv_core.Mat src,
opencv_core.Mat dst,
int ddepth,
int dx,
int dy,
int ksize,
double scale,
double delta,
int borderType)
\brief Calculates the first, second, third, or mixed image derivatives using an extended Sobel operator.
|
static boolean |
opencv_core.solve(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static boolean |
opencv_core.solve(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
int flags)
\brief Solves one or more linear systems or least-squares problems.
|
static int |
opencv_core.solveCubic(opencv_core.Mat coeffs,
opencv_core.Mat roots)
\brief Finds the real roots of a cubic equation.
|
static int |
opencv_core.solveLP(opencv_core.Mat Func,
opencv_core.Mat Constr,
opencv_core.Mat z)
\brief Solve given (non-integer) linear programming problem using the Simplex Algorithm (Simplex Method).
|
static int |
opencv_calib3d.solveP3P(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.GpuMatVector rvecs,
opencv_core.GpuMatVector tvecs,
int flags) |
static int |
opencv_calib3d.solveP3P(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.MatVector rvecs,
opencv_core.MatVector tvecs,
int flags)
\brief Finds an object pose from 3 3D-2D point correspondences.
|
static int |
opencv_calib3d.solveP3P(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.UMatVector rvecs,
opencv_core.UMatVector tvecs,
int flags) |
static boolean |
opencv_calib3d.solvePnP(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec) |
static boolean |
opencv_calib3d.solvePnP(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
boolean useExtrinsicGuess,
int flags)
\brief Finds an object pose from 3D-2D point correspondences.
|
static boolean |
opencv_calib3d.solvePnPRansac(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec) |
static boolean |
opencv_calib3d.solvePnPRansac(opencv_core.Mat objectPoints,
opencv_core.Mat imagePoints,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat rvec,
opencv_core.Mat tvec,
boolean useExtrinsicGuess,
int iterationsCount,
float reprojectionError,
double confidence,
opencv_core.Mat inliers,
int flags)
\brief Finds an object pose from 3D-2D point correspondences using the RANSAC scheme.
|
static double |
opencv_core.solvePoly(opencv_core.Mat coeffs,
opencv_core.Mat roots) |
static double |
opencv_core.solvePoly(opencv_core.Mat coeffs,
opencv_core.Mat roots,
int maxIters)
\brief Finds the real or complex roots of a polynomial equation.
|
static void |
opencv_core.SVD.solveZ(opencv_core.Mat src,
opencv_core.Mat dst)
\brief solves an under-determined singular linear system
|
static void |
opencv_core.sort(opencv_core.Mat src,
opencv_core.Mat dst,
int flags)
\brief Sorts each row or each column of a matrix.
|
static void |
opencv_core.sortIdx(opencv_core.Mat src,
opencv_core.Mat dst,
int flags)
\brief Sorts each row or each column of a matrix.
|
static void |
opencv_imgproc.spatialGradient(opencv_core.Mat src,
opencv_core.Mat dx,
opencv_core.Mat dy) |
static void |
opencv_imgproc.spatialGradient(opencv_core.Mat src,
opencv_core.Mat dx,
opencv_core.Mat dy,
int ksize,
int borderType)
\brief Calculates the first order image derivative in both x and y using a Sobel operator
|
static void |
opencv_cudaarithm.split(opencv_core.Mat src,
opencv_core.GpuMat dst) |
static void |
opencv_cudaarithm.split(opencv_core.Mat src,
opencv_core.GpuMat dst,
opencv_core.Stream stream)
\brief Copies each plane of a multi-channel matrix into an array.
|
static void |
opencv_core.split(opencv_core.Mat m,
opencv_core.GpuMatVector mv) |
static void |
opencv_cudaarithm.split(opencv_core.Mat src,
opencv_core.GpuMatVector dst) |
static void |
opencv_cudaarithm.split(opencv_core.Mat src,
opencv_core.GpuMatVector dst,
opencv_core.Stream stream)
\overload
|
static void |
opencv_core.split(opencv_core.Mat src,
opencv_core.Mat mvbegin)
\brief Divides a multi-channel array into several single-channel arrays.
|
static void |
opencv_core.split(opencv_core.Mat m,
opencv_core.MatVector mv)
\overload
|
static void |
opencv_core.split(opencv_core.Mat m,
opencv_core.UMatVector mv) |
static void |
opencv_cudaarithm.sqr(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.sqr(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Computes a square value of each matrix element.
|
static void |
opencv_imgproc.sqrBoxFilter(opencv_core.Mat _src,
opencv_core.Mat _dst,
int ddepth,
opencv_core.Size ksize) |
static void |
opencv_imgproc.sqrBoxFilter(opencv_core.Mat _src,
opencv_core.Mat _dst,
int ddepth,
opencv_core.Size ksize,
opencv_core.Point anchor,
boolean normalize,
int borderType)
\brief Calculates the normalized sum of squares of the pixel values overlapping the filter.
|
static void |
opencv_cudaarithm.sqrIntegral(opencv_core.Mat src,
opencv_core.Mat sqsum) |
static void |
opencv_cudaarithm.sqrIntegral(opencv_core.Mat src,
opencv_core.Mat sqsum,
opencv_core.Stream stream)
\brief Computes a squared integral image.
|
static opencv_core.Scalar |
opencv_cudaarithm.sqrSum(opencv_core.Mat src) |
static opencv_core.Scalar |
opencv_cudaarithm.sqrSum(opencv_core.Mat src,
opencv_core.Mat mask)
\brief Returns the squared sum of matrix elements.
|
static void |
opencv_core.sqrt(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Calculates a square root of array elements.
|
static void |
opencv_cudaarithm.sqrt(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.sqrt(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Computes a square root of each matrix element.
|
void |
opencv_videostab.IMotionStabilizer.stabilize(int size,
opencv_core.MatVector motions,
opencv_core.IntIntPair range,
opencv_core.Mat stabilizationMotions)
assumes that [0, size-1) is in or equals to [range.first, range.second)
|
void |
opencv_videostab.MotionStabilizationPipeline.stabilize(int size,
opencv_core.MatVector motions,
opencv_core.IntIntPair range,
opencv_core.Mat stabilizationMotions) |
void |
opencv_videostab.MotionFilterBase.stabilize(int size,
opencv_core.MatVector motions,
opencv_core.IntIntPair range,
opencv_core.Mat stabilizationMotions) |
void |
opencv_videostab.LpMotionStabilizer.stabilize(int size,
opencv_core.MatVector motions,
opencv_core.IntIntPair range,
opencv_core.Mat stabilizationMotions) |
void |
opencv_tracking.UkfSystemModel.stateConversionFunction(opencv_core.Mat x_k,
opencv_core.Mat u_k,
opencv_core.Mat v_k,
opencv_core.Mat x_kplus1)
The function for computing the next state from the previous state
|
opencv_tracking.UnscentedKalmanFilterParams |
opencv_tracking.UnscentedKalmanFilterParams.stateInit(opencv_core.Mat stateInit) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.statePost(opencv_core.Mat statePost) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.statePre(opencv_core.Mat statePre) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints1,
opencv_core.GpuMatVector imagePoints2,
opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints1,
opencv_core.GpuMatVector imagePoints2,
opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints1,
opencv_core.GpuMatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints1,
opencv_core.GpuMatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
int flags,
opencv_core.TermCriteria criteria)
\brief Performs stereo calibration
|
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
int flags,
opencv_core.TermCriteria criteria)
\overload
|
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F) |
static double |
opencv_calib3d.stereoCalibrate(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrateExtended(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints1,
opencv_core.GpuMatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
opencv_core.Mat perViewErrors) |
static double |
opencv_calib3d.stereoCalibrateExtended(opencv_core.GpuMatVector objectPoints,
opencv_core.GpuMatVector imagePoints1,
opencv_core.GpuMatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static double |
opencv_calib3d.stereoCalibrateExtended(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
opencv_core.Mat perViewErrors) |
static double |
opencv_calib3d.stereoCalibrateExtended(opencv_core.MatVector objectPoints,
opencv_core.MatVector imagePoints1,
opencv_core.MatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria)
\brief Calibrates the stereo camera.
|
static double |
opencv_calib3d.stereoCalibrateExtended(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
opencv_core.Mat perViewErrors) |
static double |
opencv_calib3d.stereoCalibrateExtended(opencv_core.UMatVector objectPoints,
opencv_core.UMatVector imagePoints1,
opencv_core.UMatVector imagePoints2,
opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat E,
opencv_core.Mat F,
opencv_core.Mat perViewErrors,
int flags,
opencv_core.TermCriteria criteria) |
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q) |
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat tvec,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q,
int flags) |
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat cameraMatrix1,
opencv_core.Mat distCoeffs1,
opencv_core.Mat cameraMatrix2,
opencv_core.Mat distCoeffs2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat T,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q,
int flags,
double alpha,
opencv_core.Size newImageSize,
opencv_core.Rect validPixROI1,
opencv_core.Rect validPixROI2)
\brief Computes rectification transforms for each head of a calibrated stereo camera.
|
static void |
opencv_calib3d.stereoRectify(opencv_core.Mat K1,
opencv_core.Mat D1,
opencv_core.Mat K2,
opencv_core.Mat D2,
opencv_core.Size imageSize,
opencv_core.Mat R,
opencv_core.Mat tvec,
opencv_core.Mat R1,
opencv_core.Mat R2,
opencv_core.Mat P1,
opencv_core.Mat P2,
opencv_core.Mat Q,
int flags,
opencv_core.Size newImageSize,
double balance,
double fov_scale)
\brief Stereo rectification for fisheye camera model
|
static boolean |
opencv_calib3d.stereoRectifyUncalibrated(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat F,
opencv_core.Size imgSize,
opencv_core.Mat H1,
opencv_core.Mat H2) |
static boolean |
opencv_calib3d.stereoRectifyUncalibrated(opencv_core.Mat points1,
opencv_core.Mat points2,
opencv_core.Mat F,
opencv_core.Size imgSize,
opencv_core.Mat H1,
opencv_core.Mat H2,
double threshold)
\brief Computes a rectification transform for an uncalibrated stereo camera.
|
int |
opencv_stitching.Stitcher.stitch(opencv_core.GpuMatVector images,
opencv_core.Mat pano) |
int |
opencv_stitching.Stitcher.stitch(opencv_core.GpuMatVector images,
opencv_core.RectVectorVector rois,
opencv_core.Mat pano) |
int |
opencv_stitching.Stitcher.stitch(opencv_core.MatVector images,
opencv_core.Mat pano)
\overload
|
int |
opencv_stitching.Stitcher.stitch(opencv_core.MatVector images,
opencv_core.RectVectorVector rois,
opencv_core.Mat pano)
\brief These functions try to stitch the given images.
|
int |
opencv_stitching.Stitcher.stitch(opencv_core.UMatVector images,
opencv_core.Mat pano) |
int |
opencv_stitching.Stitcher.stitch(opencv_core.UMatVector images,
opencv_core.RectVectorVector rois,
opencv_core.Mat pano) |
static void |
opencv_photo.stylization(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_photo.stylization(opencv_core.Mat src,
opencv_core.Mat dst,
float sigma_s,
float sigma_r)
\brief Stylization aims to produce digital imagery with a wide variety of effects not focused on
photorealism.
|
static opencv_core.Mat |
opencv_core.LDA.subspaceProject(opencv_core.Mat W,
opencv_core.Mat mean,
opencv_core.Mat src) |
static opencv_core.Mat |
opencv_core.LDA.subspaceReconstruct(opencv_core.Mat W,
opencv_core.Mat mean,
opencv_core.Mat src) |
static opencv_core.MatExpr |
opencv_core.subtract(opencv_core.Mat m) |
static opencv_core.MatExpr |
opencv_core.subtract(opencv_core.MatExpr e,
opencv_core.Mat m) |
static opencv_core.MatExpr |
opencv_core.subtract(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.MatExpr |
opencv_core.subtract(opencv_core.Mat m,
opencv_core.MatExpr e) |
static void |
opencv_core.subtract(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.subtract(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst) |
static void |
opencv_core.subtract(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask,
int dtype)
\brief Calculates the per-element difference between two arrays or array and a scalar.
|
static void |
opencv_cudaarithm.subtract(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst,
opencv_core.Mat mask,
int dtype,
opencv_core.Stream stream)
\brief Computes a matrix-matrix or matrix-scalar difference.
|
static opencv_core.MatExpr |
opencv_core.subtract(opencv_core.Mat a,
opencv_core.Scalar s) |
static opencv_core.MatExpr |
opencv_core.subtract(opencv_core.Scalar s,
opencv_core.Mat a) |
static opencv_core.Mat |
opencv_core.subtractPut(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.Mat |
opencv_core.subtractPut(opencv_core.Mat a,
opencv_core.Scalar b) |
static opencv_core.Scalar |
opencv_cudaarithm.sum(opencv_core.Mat src) |
static opencv_core.Scalar |
opencv_cudaarithm.sum(opencv_core.Mat src,
opencv_core.Mat mask)
\brief Returns the sum of matrix elements.
|
static opencv_core.Scalar |
opencv_core.sumElems(opencv_core.Mat src)
\brief Calculates the sum of array elements.
|
void |
opencv_videostab.WobbleSuppressorBase.suppress(int idx,
opencv_core.Mat frame,
opencv_core.Mat result) |
void |
opencv_videostab.NullWobbleSuppressor.suppress(int idx,
opencv_core.Mat frame,
opencv_core.Mat result) |
void |
opencv_videostab.MoreAccurateMotionWobbleSuppressor.suppress(int idx,
opencv_core.Mat frame,
opencv_core.Mat result) |
static void |
opencv_core.SVBackSubst(opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt,
opencv_core.Mat rhs,
opencv_core.Mat dst)
wrap SVD::backSubst
|
static void |
opencv_core.SVDecomp(opencv_core.Mat src,
opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt) |
static void |
opencv_core.SVDecomp(opencv_core.Mat src,
opencv_core.Mat w,
opencv_core.Mat u,
opencv_core.Mat vt,
int flags)
wrap SVD::compute
|
static void |
opencv_core.swap(opencv_core.Mat a,
opencv_core.Mat b)
\brief Swaps two matrices
|
static void |
opencv_cudaimgproc.swapChannels(opencv_core.Mat image,
int[] dstOrder) |
static void |
opencv_cudaimgproc.swapChannels(opencv_core.Mat image,
int[] dstOrder,
opencv_core.Stream stream) |
static void |
opencv_cudaimgproc.swapChannels(opencv_core.Mat image,
IntBuffer dstOrder) |
static void |
opencv_cudaimgproc.swapChannels(opencv_core.Mat image,
IntBuffer dstOrder,
opencv_core.Stream stream) |
static void |
opencv_cudaimgproc.swapChannels(opencv_core.Mat image,
org.bytedeco.javacpp.IntPointer dstOrder) |
static void |
opencv_cudaimgproc.swapChannels(opencv_core.Mat image,
org.bytedeco.javacpp.IntPointer dstOrder,
opencv_core.Stream stream)
\brief Exchanges the color channels of an image in-place.
|
opencv_stitching.CameraParams |
opencv_stitching.CameraParams.t(opencv_core.Mat t) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.temp1(opencv_core.Mat temp1) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.temp2(opencv_core.Mat temp2) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.temp3(opencv_core.Mat temp3) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.temp4(opencv_core.Mat temp4) |
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.temp5(opencv_core.Mat temp5) |
static void |
opencv_photo.textureFlattening(opencv_core.Mat src,
opencv_core.Mat mask,
opencv_core.Mat dst) |
static void |
opencv_photo.textureFlattening(opencv_core.Mat src,
opencv_core.Mat mask,
opencv_core.Mat dst,
float low_threshold,
float high_threshold,
int kernel_size)
\brief By retaining only the gradients at edge locations, before integrating with the Poisson solver, one
washes out the texture of the selected region, giving its contents a flat aspect.
|
static void |
opencv_ximgproc.thinning(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_ximgproc.thinning(opencv_core.Mat src,
opencv_core.Mat dst,
int thinningType)
\brief Applies a binary blob thinning operation, to achieve a skeletization of the input image.
|
static double |
opencv_cudaarithm.threshold(opencv_core.Mat src,
opencv_core.Mat dst,
double thresh,
double maxval,
int type) |
static double |
opencv_imgproc.threshold(opencv_core.Mat src,
opencv_core.Mat dst,
double thresh,
double maxval,
int type)
\} imgproc_motion
|
static double |
opencv_cudaarithm.threshold(opencv_core.Mat src,
opencv_core.Mat dst,
double thresh,
double maxval,
int type,
opencv_core.Stream stream)
\brief Applies a fixed-level threshold to each array element.
|
static opencv_core.Scalar |
opencv_core.trace(opencv_core.Mat mtx)
\brief Returns the trace of a matrix.
|
void |
opencv_face.FaceRecognizer.train(opencv_core.GpuMatVector src,
opencv_core.Mat labels) |
boolean |
opencv_ml.StatModel.train(opencv_core.Mat samples,
int layout,
opencv_core.Mat responses)
\brief Trains the statistical model
|
void |
opencv_face.FaceRecognizer.train(opencv_core.MatVector src,
opencv_core.Mat labels)
\brief Trains a FaceRecognizer with given data and associated labels.
|
void |
opencv_face.FaceRecognizer.train(opencv_core.UMatVector src,
opencv_core.Mat labels) |
boolean |
opencv_ml.SVM.trainAuto(opencv_core.Mat samples,
int layout,
opencv_core.Mat responses) |
boolean |
opencv_ml.SVM.trainAuto(opencv_core.Mat samples,
int layout,
opencv_core.Mat responses,
int kFold,
opencv_ml.ParamGrid Cgrid,
opencv_ml.ParamGrid gammaGrid,
opencv_ml.ParamGrid pGrid,
opencv_ml.ParamGrid nuGrid,
opencv_ml.ParamGrid coeffGrid,
opencv_ml.ParamGrid degreeGrid,
boolean balanced)
\brief Trains an %SVM with optimal parameters
|
void |
opencv_tracking.BaseClassifier.trainClassifier(opencv_core.Mat image,
int target,
float importance,
boolean[] errorMask) |
void |
opencv_tracking.BaseClassifier.trainClassifier(opencv_core.Mat image,
int target,
float importance,
org.bytedeco.javacpp.BoolPointer errorMask) |
boolean |
opencv_ml.EM.trainE(opencv_core.Mat samples,
opencv_core.Mat means0) |
boolean |
opencv_ml.EM.trainE(opencv_core.Mat samples,
opencv_core.Mat means0,
opencv_core.Mat covs0,
opencv_core.Mat weights0,
opencv_core.Mat logLikelihoods,
opencv_core.Mat labels,
opencv_core.Mat probs)
\brief Estimate the Gaussian mixture parameters from a samples set.
|
boolean |
opencv_ml.EM.trainEM(opencv_core.Mat samples) |
boolean |
opencv_ml.EM.trainEM(opencv_core.Mat samples,
opencv_core.Mat logLikelihoods,
opencv_core.Mat labels,
opencv_core.Mat probs)
\brief Estimate the Gaussian mixture parameters from a samples set.
|
boolean |
opencv_ml.EM.trainM(opencv_core.Mat samples,
opencv_core.Mat probs0) |
boolean |
opencv_ml.EM.trainM(opencv_core.Mat samples,
opencv_core.Mat probs0,
opencv_core.Mat logLikelihoods,
opencv_core.Mat labels,
opencv_core.Mat probs)
\brief Estimate the Gaussian mixture parameters from a samples set.
|
void |
opencv_cudaarithm.LookUpTable.transform(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
opencv_core.transform(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat m)
\brief Performs the matrix transformation of every array element.
|
void |
opencv_cudaarithm.LookUpTable.transform(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Transforms the source matrix into the destination matrix using the given look-up table:
dst(I) = lut(src(I)) .
|
opencv_video.KalmanFilter |
opencv_video.KalmanFilter.transitionMatrix(opencv_core.Mat transitionMatrix) |
static void |
opencv_core.transpose(opencv_core.Mat src,
opencv_core.Mat dst)
\brief Transposes a matrix.
|
static void |
opencv_cudaarithm.transpose(opencv_core.Mat src1,
opencv_core.Mat dst) |
static void |
opencv_cudaarithm.transpose(opencv_core.Mat src1,
opencv_core.Mat dst,
opencv_core.Stream stream)
\brief Transposes a matrix.
|
static void |
opencv_calib3d.triangulatePoints(opencv_core.Mat projMatr1,
opencv_core.Mat projMatr2,
opencv_core.Mat projPoints1,
opencv_core.Mat projPoints2,
opencv_core.Mat points4D)
\brief Reconstructs points by triangulation.
|
opencv_core.SVD |
opencv_core.SVD.u(opencv_core.Mat u) |
static void |
opencv_imgproc.undistort(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static void |
opencv_imgproc.undistort(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat newCameraMatrix)
\} imgproc_filter
|
static void |
opencv_calib3d.undistortImage(opencv_core.Mat distorted,
opencv_core.Mat undistorted,
opencv_core.Mat K,
opencv_core.Mat D) |
static void |
opencv_calib3d.undistortImage(opencv_core.Mat distorted,
opencv_core.Mat undistorted,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.Mat Knew,
opencv_core.Size new_size)
\brief Transforms an image to compensate for fisheye lens distortion.
|
static void |
opencv_calib3d.undistortPoints(opencv_core.Mat distorted,
opencv_core.Mat undistorted,
opencv_core.Mat K,
opencv_core.Mat D) |
static void |
opencv_imgproc.undistortPoints(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs) |
static void |
opencv_calib3d.undistortPoints(opencv_core.Mat distorted,
opencv_core.Mat undistorted,
opencv_core.Mat K,
opencv_core.Mat D,
opencv_core.Mat R,
opencv_core.Mat P)
\brief Undistorts 2D points using fisheye model
|
static void |
opencv_imgproc.undistortPoints(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat R,
opencv_core.Mat P)
\brief Computes the ideal point coordinates from the observed point coordinates.
|
static void |
opencv_imgproc.undistortPointsIter(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat cameraMatrix,
opencv_core.Mat distCoeffs,
opencv_core.Mat R,
opencv_core.Mat P,
opencv_core.TermCriteria criteria)
\overload
\note Default version of #undistortPoints does 5 iterations to compute undistorted points.
|
static void |
opencv_core.unregisterPageLocked(opencv_core.Mat m)
\brief Unmaps the memory of matrix and makes it pageable again.
|
void |
opencv_structured_light.SinusoidalPattern.unwrapPhaseMap(opencv_core.GpuMatVector wrappedPhaseMap,
opencv_core.Mat unwrappedPhaseMap,
opencv_core.Size camSize) |
void |
opencv_structured_light.SinusoidalPattern.unwrapPhaseMap(opencv_core.GpuMatVector wrappedPhaseMap,
opencv_core.Mat unwrappedPhaseMap,
opencv_core.Size camSize,
opencv_core.Mat shadowMask) |
void |
opencv_phase_unwrapping.PhaseUnwrapping.unwrapPhaseMap(opencv_core.Mat wrappedPhaseMap,
opencv_core.Mat unwrappedPhaseMap) |
void |
opencv_phase_unwrapping.PhaseUnwrapping.unwrapPhaseMap(opencv_core.Mat wrappedPhaseMap,
opencv_core.Mat unwrappedPhaseMap,
opencv_core.Mat shadowMask)
\brief Unwraps a 2D phase map.
|
void |
opencv_structured_light.SinusoidalPattern.unwrapPhaseMap(opencv_core.MatVector wrappedPhaseMap,
opencv_core.Mat unwrappedPhaseMap,
opencv_core.Size camSize) |
void |
opencv_structured_light.SinusoidalPattern.unwrapPhaseMap(opencv_core.MatVector wrappedPhaseMap,
opencv_core.Mat unwrappedPhaseMap,
opencv_core.Size camSize,
opencv_core.Mat shadowMask)
\brief Unwrap the wrapped phase map to remove phase ambiguities.
|
void |
opencv_structured_light.SinusoidalPattern.unwrapPhaseMap(opencv_core.UMatVector wrappedPhaseMap,
opencv_core.Mat unwrappedPhaseMap,
opencv_core.Size camSize) |
void |
opencv_structured_light.SinusoidalPattern.unwrapPhaseMap(opencv_core.UMatVector wrappedPhaseMap,
opencv_core.Mat unwrappedPhaseMap,
opencv_core.Size camSize,
opencv_core.Mat shadowMask) |
boolean |
opencv_tracking.MultiTrackerTLD.update_opt(opencv_core.Mat image)
\brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets by
optimized update method using some techniques to speedup calculations specifically for MO TLD.
|
void |
opencv_face.FaceRecognizer.update(opencv_core.GpuMatVector src,
opencv_core.Mat labels) |
boolean |
opencv_tracking.MultiTracker.update(opencv_core.Mat image)
\brief Update the current tracking status.
|
boolean |
opencv_tracking.MultiTracker_Alt.update(opencv_core.Mat image)
\brief Update all trackers from the tracking-list, find a new most likely bounding boxes for the targets
|
boolean |
opencv_tracking.StrongClassifierDirectSelection.update(opencv_core.Mat image,
int target) |
boolean |
opencv_tracking.StrongClassifierDirectSelection.update(opencv_core.Mat image,
int target,
float importance) |
void |
opencv_tracking.ClfMilBoost.update(opencv_core.Mat posx,
opencv_core.Mat negx) |
void |
opencv_tracking.ClfOnlineStump.update(opencv_core.Mat posx,
opencv_core.Mat negx) |
boolean |
opencv_tracking.Tracker.update(opencv_core.Mat image,
opencv_core.Rect2d boundingBox)
\brief Update the tracker, find the new most likely bounding box for the target
|
boolean |
opencv_tracking.MultiTracker.update(opencv_core.Mat image,
opencv_core.Rect2dVector boundingBox)
\brief Update the current tracking status.
|
void |
opencv_face.FaceRecognizer.update(opencv_core.MatVector src,
opencv_core.Mat labels)
\brief Updates a FaceRecognizer with given data and associated labels.
|
void |
opencv_face.FaceRecognizer.update(opencv_core.UMatVector src,
opencv_core.Mat labels) |
static void |
opencv_optflow.updateMotionHistory(opencv_core.Mat silhouette,
opencv_core.Mat mhi,
double timestamp,
double duration)
\addtogroup optflow
\{
|
void |
opencv_core.GpuMat.upload(opencv_core.Mat arr)
\brief Performs data upload to GpuMat (Blocking call)
|
void |
opencv_core.GpuMat.upload(opencv_core.Mat arr,
opencv_core.Stream stream)
\brief Performs data upload to GpuMat (Non-Blocking call)
|
static void |
opencv_calib3d.validateDisparity(opencv_core.Mat disparity,
opencv_core.Mat cost,
int minDisparity,
int numberOfDisparities) |
static void |
opencv_calib3d.validateDisparity(opencv_core.Mat disparity,
opencv_core.Mat cost,
int minDisparity,
int numberOfDisparities,
int disp12MaxDisp)
validates disparity using the left-right check.
|
static void |
opencv_core.vconcat(opencv_core.GpuMatVector src,
opencv_core.Mat dst) |
static void |
opencv_core.vconcat(opencv_core.Mat src,
long nsrc,
opencv_core.GpuMat dst) |
static void |
opencv_core.vconcat(opencv_core.Mat src,
long nsrc,
opencv_core.Mat dst)
\brief Applies vertical concatenation to given matrices.
|
static void |
opencv_core.vconcat(opencv_core.Mat src,
long nsrc,
opencv_core.UMat dst) |
static void |
opencv_core.vconcat(opencv_core.Mat src1,
opencv_core.Mat src2,
opencv_core.Mat dst)
\overload
|
static void |
opencv_core.vconcat(opencv_core.MatVector src,
opencv_core.Mat dst)
\overload
|
static void |
opencv_core.vconcat(opencv_core.UMatVector src,
opencv_core.Mat dst) |
opencv_core.SVD |
opencv_core.SVD.vt(opencv_core.Mat vt) |
opencv_core.SVD |
opencv_core.SVD.w(opencv_core.Mat w) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.DetailSphericalWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.DetailCylindricalWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.GpuMat src,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
int interp_mode,
int border_mode,
opencv_core.GpuMat dst) |
opencv_core.Point |
opencv_stitching.RotationWarper.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst)
\brief Projects the image.
|
opencv_core.Point |
opencv_stitching.DetailPlaneWarper.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.AffineWarper.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailSphericalWarper.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailCylindricalWarper.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailSphericalWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailCylindricalWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarper.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
opencv_core.Point |
opencv_stitching.DetailPlaneWarperGpu.warp(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T,
int interp_mode,
int border_mode,
opencv_core.Mat dst) |
static void |
opencv_cudawarping.warpAffine(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpAffine(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpAffine(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue)
\brief Applies an affine transformation to an image.
|
static void |
opencv_cudawarping.warpAffine(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue,
opencv_core.Stream stream)
\brief Applies an affine transformation to an image.
|
void |
opencv_stitching.RotationWarper.warpBackward(opencv_core.Mat src,
opencv_core.Mat K,
opencv_core.Mat R,
int interp_mode,
int border_mode,
opencv_core.Size dst_size,
opencv_core.Mat dst)
\brief Projects the image backward.
|
void |
opencv_shape.ShapeTransformer.warpImage(opencv_core.Mat transformingImage,
opencv_core.Mat output) |
void |
opencv_shape.ShapeTransformer.warpImage(opencv_core.Mat transformingImage,
opencv_core.Mat output,
int flags,
int borderMode,
opencv_core.Scalar borderValue)
\brief Apply a transformation, given a pre-estimated transformation parameters, to an Image.
|
static void |
opencv_cudawarping.warpPerspective(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpPerspective(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize) |
static void |
opencv_imgproc.warpPerspective(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue)
\brief Applies a perspective transformation to an image.
|
static void |
opencv_cudawarping.warpPerspective(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Mat M,
opencv_core.Size dsize,
int flags,
int borderMode,
opencv_core.Scalar borderValue,
opencv_core.Stream stream)
\brief Applies a perspective transformation to an image.
|
opencv_core.Point2f |
opencv_stitching.RotationWarper.warpPoint(opencv_core.Point2f pt,
opencv_core.Mat K,
opencv_core.Mat R)
\brief Projects the image point.
|
opencv_core.Point2f |
opencv_stitching.DetailPlaneWarper.warpPoint(opencv_core.Point2f pt,
opencv_core.Mat K,
opencv_core.Mat R) |
opencv_core.Point2f |
opencv_stitching.AffineWarper.warpPoint(opencv_core.Point2f pt,
opencv_core.Mat K,
opencv_core.Mat R) |
opencv_core.Point2f |
opencv_stitching.DetailPlaneWarper.warpPoint(opencv_core.Point2f pt,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T) |
static void |
opencv_imgproc.warpPolar(opencv_core.Mat src,
opencv_core.Mat dst,
opencv_core.Size dsize,
opencv_core.Point2f center,
double maxRadius,
int flags)
\brief Remaps an image to polar or semilog-polar coordinates space
|
opencv_core.Rect |
opencv_stitching.RotationWarper.warpRoi(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.warpRoi(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R) |
opencv_core.Rect |
opencv_stitching.AffineWarper.warpRoi(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R) |
opencv_core.Rect |
opencv_stitching.DetailPlaneWarper.warpRoi(opencv_core.Size src_size,
opencv_core.Mat K,
opencv_core.Mat R,
opencv_core.Mat T) |
static void |
opencv_imgproc.watershed(opencv_core.Mat image,
opencv_core.Mat markers)
\brief Performs a marker-based image segmentation using the watershed algorithm.
|
void |
opencv_core.FileStorage.write(org.bytedeco.javacpp.BytePointer name,
opencv_core.Mat val)
\overload
|
static void |
opencv_core.write(opencv_core.FileStorage fs,
org.bytedeco.javacpp.BytePointer name,
opencv_core.Mat value) |
static void |
opencv_core.write(opencv_core.FileStorage fs,
String name,
opencv_core.Mat value) |
void |
opencv_videoio.VideoWriter.write(opencv_core.Mat image)
\brief Writes the next video frame
|
void |
opencv_core.FileStorage.write(String name,
opencv_core.Mat val) |
void |
opencv_tracking.CvFeatureEvaluator.writeFeatures(opencv_core.FileStorage fs,
opencv_core.Mat featureMap) |
void |
opencv_tracking.CvHaarEvaluator.writeFeatures(opencv_core.FileStorage fs,
opencv_core.Mat featureMap) |
void |
opencv_tracking.CvHOGEvaluator.writeFeatures(opencv_core.FileStorage fs,
opencv_core.Mat featureMap) |
void |
opencv_tracking.CvLBPEvaluator.writeFeatures(opencv_core.FileStorage fs,
opencv_core.Mat featureMap) |
static boolean |
opencv_optflow.writeOpticalFlow(org.bytedeco.javacpp.BytePointer path,
opencv_core.Mat flow)
\brief Write a .flo to disk
|
static boolean |
opencv_optflow.writeOpticalFlow(String path,
opencv_core.Mat flow) |
static opencv_core.MatExpr |
opencv_core.xor(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.MatExpr |
opencv_core.xor(opencv_core.Mat a,
opencv_core.Scalar s) |
static opencv_core.MatExpr |
opencv_core.xor(opencv_core.Scalar s,
opencv_core.Mat a) |
static opencv_core.Mat |
opencv_core.xorPut(opencv_core.Mat a,
opencv_core.Mat b) |
static opencv_core.Mat |
opencv_core.xorPut(opencv_core.Mat a,
opencv_core.Scalar b) |
| Constructor and Description |
|---|
Config(opencv_core.Mat rot,
opencv_core.Point2f trans,
float scaling,
int scale_id) |
CvMat(opencv_core.Mat m)
Deprecated.
|
CvMatND(opencv_core.Mat m)
Deprecated.
|
Dictionary(opencv_core.Mat _bytesList,
int _markerSize,
int _maxcorr) |
GpuMat(opencv_core.Mat arr) |
GpuMat(opencv_core.Mat arr,
opencv_core.GpuMat.Allocator allocator)
builds GpuMat from host memory (Blocking call)
|
HostMem(opencv_core.Mat arr) |
HostMem(opencv_core.Mat arr,
int alloc_type)
creates from host memory with coping data
|
Index(opencv_core.Mat features,
opencv_flann.IndexParams params) |
Index(opencv_core.Mat features,
opencv_flann.IndexParams params,
int distType) |
IplImage(opencv_core.Mat m) |
LDA(opencv_core.GpuMatVector src,
opencv_core.Mat labels) |
LDA(opencv_core.GpuMatVector src,
opencv_core.Mat labels,
int num_components) |
LDA(opencv_core.MatVector src,
opencv_core.Mat labels) |
LDA(opencv_core.MatVector src,
opencv_core.Mat labels,
int num_components)
Initializes and performs a Discriminant Analysis with Fisher's
Optimization Criterion on given data in src and corresponding labels
in labels.
|
LDA(opencv_core.UMatVector src,
opencv_core.Mat labels) |
LDA(opencv_core.UMatVector src,
opencv_core.Mat labels,
int num_components) |
LineIterator(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2) |
LineIterator(opencv_core.Mat img,
opencv_core.Point pt1,
opencv_core.Point pt2,
int connectivity,
boolean leftToRight)
\brief initializes the iterator
|
Mat(opencv_core.Mat m)
\overload
|
Mat(opencv_core.Mat m,
opencv_core.Range rowRange) |
Mat(opencv_core.Mat m,
opencv_core.Range rowRange,
opencv_core.Range colRange)
\overload
|
Mat(opencv_core.Mat m,
opencv_core.Rect roi)
\overload
|
MatBytePairVector(opencv_core.Mat[] firstValue,
byte[] secondValue) |
MatConstIterator(opencv_core.Mat _m)
constructor that sets the iterator to the beginning of the matrix
|
MatConstIterator(opencv_core.Mat _m,
int _row) |
MatConstIterator(opencv_core.Mat _m,
int _row,
int _col)
constructor that sets the iterator to the specified element of the matrix
|
MatConstIterator(opencv_core.Mat _m,
opencv_core.Point _pt)
constructor that sets the iterator to the specified element of the matrix
|
MatExpr(opencv_core.Mat m) |
MatExpr(opencv_core.MatOp _op,
int _flags,
opencv_core.Mat _a,
opencv_core.Mat _b,
opencv_core.Mat _c,
double _alpha,
double _beta,
opencv_core.Scalar _s) |
MatPointerVector(opencv_core.Mat... array) |
MatPointerVector(opencv_core.Mat value) |
MatVector(opencv_core.Mat... array) |
MatVector(opencv_core.Mat value) |
NAryMatIterator(opencv_core.Mat arrays,
byte[] ptrs) |
NAryMatIterator(opencv_core.Mat arrays,
byte[] ptrs,
int narrays) |
NAryMatIterator(opencv_core.Mat arrays,
ByteBuffer ptrs) |
NAryMatIterator(opencv_core.Mat arrays,
ByteBuffer ptrs,
int narrays) |
NAryMatIterator(opencv_core.Mat arrays,
org.bytedeco.javacpp.BytePointer ptrs) |
NAryMatIterator(opencv_core.Mat arrays,
org.bytedeco.javacpp.BytePointer ptrs,
int narrays) |
NAryMatIterator(opencv_core.Mat arrays,
opencv_core.Mat planes) |
NAryMatIterator(opencv_core.Mat arrays,
opencv_core.Mat planes,
int narrays) |
NAryMatIterator(org.bytedeco.javacpp.PointerPointer arrays,
opencv_core.Mat planes,
int narrays)
the full constructor taking arbitrary number of n-dim matrices
|
PCA(opencv_core.Mat data,
opencv_core.Mat mean,
int flags) |
PCA(opencv_core.Mat data,
opencv_core.Mat mean,
int flags,
double retainedVariance)
\overload
|
PCA(opencv_core.Mat data,
opencv_core.Mat mean,
int flags,
int maxComponents)
\overload
|
SparseMat(opencv_core.Mat m)
\overload
|
SVD(opencv_core.Mat src) |
SVD(opencv_core.Mat src,
int flags)
\overload
initializes an empty SVD structure and then calls SVD::operator()
|
SyntheticSequenceGenerator(opencv_core.Mat background,
opencv_core.Mat object,
double amplitude,
double wavelength,
double wavespeed,
double objspeed)
\brief Creates an instance of SyntheticSequenceGenerator.
|
TrackerAdaBoostingTargetState(opencv_core.Point2f position,
int width,
int height,
boolean foreground,
opencv_core.Mat responses)
\brief Constructor
|
TrackerMILTargetState(opencv_core.Point2f position,
int width,
int height,
boolean foreground,
opencv_core.Mat features)
\brief Constructor
|
TrackerSamplerPF(opencv_core.Mat chosenRect) |
TrackerSamplerPF(opencv_core.Mat chosenRect,
opencv_tracking.TrackerSamplerPF.Params parameters)
\brief Constructor
|
| Modifier and Type | Field and Description |
|---|---|
static opencv_core.Mat |
opencv_core.AbstractMat.EMPTY |
Copyright © 2018. All rights reserved.