public class opencv_ximgproc extends opencv_ximgproc
| Modifier and Type | Class and Description |
|---|---|
static class |
opencv_ximgproc.AdaptiveManifoldFilter
\brief Interface for Adaptive Manifold Filter realizations.
|
static class |
opencv_ximgproc.DisparityFilter
\addtogroup ximgproc_filters
\{
|
static class |
opencv_ximgproc.DisparityWLSFilter
\brief Disparity map filter based on Weighted Least Squares filter (in form of Fast Global Smoother that
is a lot faster than traditional Weighted Least Squares filter implementations) and optional use of
left-right-consistency-based confidence to refine the results in half-occlusions and uniform areas.
|
static class |
opencv_ximgproc.DTFilter
\brief Interface for realizations of Domain Transform filter.
|
static class |
opencv_ximgproc.EdgeAwareInterpolator
\brief Sparse match interpolation algorithm based on modified locally-weighted affine
estimator from \cite Revaud2015 and Fast Global Smoother as post-processing filter.
|
static class |
opencv_ximgproc.FastGlobalSmootherFilter
\brief Interface for implementations of Fast Global Smoother filter.
|
static class |
opencv_ximgproc.GraphSegmentation
\addtogroup ximgproc_segmentation
\{
|
static class |
opencv_ximgproc.GuidedFilter
\brief Interface for realizations of Guided Filter.
|
static class |
opencv_ximgproc.RFFeatureGetter
\addtogroup ximgproc_edge
\{
|
static class |
opencv_ximgproc.SelectiveSearchSegmentation
\brief Selective search segmentation algorithm
The class implements the algorithm described in \cite uijlings2013selective.
|
static class |
opencv_ximgproc.SelectiveSearchSegmentationStrategy
\brief Strategie for the selective search segmentation algorithm
The class implements a generic stragery for the algorithm described in \cite uijlings2013selective.
|
static class |
opencv_ximgproc.SelectiveSearchSegmentationStrategyColor
\brief Color-based strategy for the selective search segmentation algorithm
The class is implemented from the algorithm described in \cite uijlings2013selective.
|
static class |
opencv_ximgproc.SelectiveSearchSegmentationStrategyFill
\brief Fill-based strategy for the selective search segmentation algorithm
The class is implemented from the algorithm described in \cite uijlings2013selective.
|
static class |
opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple
\brief Regroup multiple strategies for the selective search segmentation algorithm
|
static class |
opencv_ximgproc.SelectiveSearchSegmentationStrategySize
\brief Size-based strategy for the selective search segmentation algorithm
The class is implemented from the algorithm described in \cite uijlings2013selective.
|
static class |
opencv_ximgproc.SelectiveSearchSegmentationStrategyTexture
\brief Texture-based strategy for the selective search segmentation algorithm
The class is implemented from the algorithm described in \cite uijlings2013selective.
|
static class |
opencv_ximgproc.SparseMatchInterpolator
\addtogroup ximgproc_filters
\{
|
static class |
opencv_ximgproc.StructuredEdgeDetection
\brief Class implementing edge detection algorithm from \cite Dollar2013 :
|
static class |
opencv_ximgproc.SuperpixelLSC
\addtogroup ximgproc_superpixel
\{
|
static class |
opencv_ximgproc.SuperpixelSEEDS
\addtogroup ximgproc_superpixel
\{
|
static class |
opencv_ximgproc.SuperpixelSLIC
\brief Class implementing the SLIC (Simple Linear Iterative Clustering) superpixels
algorithm described in \cite Achanta2012.
|
| Modifier and Type | Field and Description |
|---|---|
static int |
AM_FILTER
enum cv::ximgproc::EdgeAwareFiltersList
|
static int |
ARO_0_45
enum cv::ximgproc::AngleRangeOption
|
static int |
ARO_315_0
enum cv::ximgproc::AngleRangeOption
|
static int |
ARO_315_135
enum cv::ximgproc::AngleRangeOption
|
static int |
ARO_315_45
enum cv::ximgproc::AngleRangeOption
|
static int |
ARO_45_135
enum cv::ximgproc::AngleRangeOption
|
static int |
ARO_45_90
enum cv::ximgproc::AngleRangeOption
|
static int |
ARO_90_135
enum cv::ximgproc::AngleRangeOption
|
static int |
ARO_CTR_HOR
enum cv::ximgproc::AngleRangeOption
|
static int |
ARO_CTR_VER
enum cv::ximgproc::AngleRangeOption
|
static int |
BINARIZATION_NIBLACK
enum cv::ximgproc::LocalBinarizationMethods
|
static int |
BINARIZATION_NICK
enum cv::ximgproc::LocalBinarizationMethods
|
static int |
BINARIZATION_SAUVOLA
enum cv::ximgproc::LocalBinarizationMethods
|
static int |
BINARIZATION_WOLF
enum cv::ximgproc::LocalBinarizationMethods
|
static int |
DTF_IC
enum cv::ximgproc::EdgeAwareFiltersList
|
static int |
DTF_NC
enum cv::ximgproc::EdgeAwareFiltersList
|
static int |
DTF_RF
enum cv::ximgproc::EdgeAwareFiltersList
|
static int |
FHT_ADD
enum cv::ximgproc::HoughOp
|
static int |
FHT_AVE
enum cv::ximgproc::HoughOp
|
static int |
FHT_MAX
enum cv::ximgproc::HoughOp
|
static int |
FHT_MIN
enum cv::ximgproc::HoughOp
|
static int |
GUIDED_FILTER
enum cv::ximgproc::EdgeAwareFiltersList
|
static int |
HDO_DESKEW
enum cv::ximgproc::HoughDeskewOption
|
static int |
HDO_RAW
enum cv::ximgproc::HoughDeskewOption
|
static int |
MSLIC
enum cv::ximgproc::SLIC
|
static int |
RO_IGNORE_BORDERS
enum cv::ximgproc::RulesOption
|
static int |
RO_STRICT
enum cv::ximgproc::RulesOption
|
static int |
SLIC
enum cv::ximgproc::SLIC
|
static int |
SLICO
enum cv::ximgproc::SLIC
|
static int |
THINNING_GUOHALL
enum cv::ximgproc::ThinningTypes
|
static int |
THINNING_ZHANGSUEN
enum cv::ximgproc::ThinningTypes
|
| Constructor and Description |
|---|
opencv_ximgproc() |
| Modifier and Type | Method and Description |
|---|---|
static void |
amFilter(opencv_core.GpuMat joint,
opencv_core.GpuMat src,
opencv_core.GpuMat dst,
double sigma_s,
double sigma_r) |
static void |
amFilter(opencv_core.GpuMat joint,
opencv_core.GpuMat src,
opencv_core.GpuMat dst,
double sigma_s,
double sigma_r,
boolean adjust_outliers) |
static void |
amFilter(opencv_core.Mat joint,
opencv_core.Mat src,
opencv_core.Mat dst,
double sigma_s,
double sigma_r) |
static void |
amFilter(opencv_core.Mat joint,
opencv_core.Mat src,
opencv_core.Mat dst,
double sigma_s,
double sigma_r,
boolean adjust_outliers)
\brief Simple one-line Adaptive Manifold Filter call.
|
static void |
amFilter(opencv_core.UMat joint,
opencv_core.UMat src,
opencv_core.UMat dst,
double sigma_s,
double sigma_r) |
static void |
amFilter(opencv_core.UMat joint,
opencv_core.UMat src,
opencv_core.UMat dst,
double sigma_s,
double sigma_r,
boolean adjust_outliers) |
static void |
anisotropicDiffusion(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
float alpha,
float K,
int niters) |
static void |
anisotropicDiffusion(opencv_core.Mat src,
opencv_core.Mat dst,
float alpha,
float K,
int niters)
\brief Performs anisotropic diffusian on an image.
|
static void |
anisotropicDiffusion(opencv_core.UMat src,
opencv_core.UMat dst,
float alpha,
float K,
int niters) |
static void |
bilateralTextureFilter(opencv_core.GpuMat src,
opencv_core.GpuMat dst) |
static void |
bilateralTextureFilter(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
int fr,
int numIter,
double sigmaAlpha,
double sigmaAvg) |
static void |
bilateralTextureFilter(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
bilateralTextureFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int fr,
int numIter,
double sigmaAlpha,
double sigmaAvg)
\brief Applies the bilateral texture filter to an image.
|
static void |
bilateralTextureFilter(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
bilateralTextureFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int fr,
int numIter,
double sigmaAlpha,
double sigmaAvg) |
static double |
computeBadPixelPercent(opencv_core.GpuMat GT,
opencv_core.GpuMat src,
opencv_core.Rect ROI) |
static double |
computeBadPixelPercent(opencv_core.GpuMat GT,
opencv_core.GpuMat src,
opencv_core.Rect ROI,
int thresh) |
static double |
computeBadPixelPercent(opencv_core.Mat GT,
opencv_core.Mat src,
opencv_core.Rect ROI) |
static double |
computeBadPixelPercent(opencv_core.Mat GT,
opencv_core.Mat src,
opencv_core.Rect ROI,
int thresh)
\brief Function for computing the percent of "bad" pixels in the disparity map
(pixels where error is higher than a specified threshold)
|
static double |
computeBadPixelPercent(opencv_core.UMat GT,
opencv_core.UMat src,
opencv_core.Rect ROI) |
static double |
computeBadPixelPercent(opencv_core.UMat GT,
opencv_core.UMat src,
opencv_core.Rect ROI,
int thresh) |
static double |
computeMSE(opencv_core.GpuMat GT,
opencv_core.GpuMat src,
opencv_core.Rect ROI) |
static double |
computeMSE(opencv_core.Mat GT,
opencv_core.Mat src,
opencv_core.Rect ROI)
\brief Function for computing mean square error for disparity maps
|
static double |
computeMSE(opencv_core.UMat GT,
opencv_core.UMat src,
opencv_core.Rect ROI) |
static void |
covarianceEstimation(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
int windowRows,
int windowCols) |
static void |
covarianceEstimation(opencv_core.Mat src,
opencv_core.Mat dst,
int windowRows,
int windowCols)
\brief Computes the estimated covariance matrix of an image using the sliding
window forumlation.
|
static void |
covarianceEstimation(opencv_core.UMat src,
opencv_core.UMat dst,
int windowRows,
int windowCols) |
static opencv_ximgproc.AdaptiveManifoldFilter |
createAMFilter(double sigma_s,
double sigma_r) |
static opencv_ximgproc.AdaptiveManifoldFilter |
createAMFilter(double sigma_s,
double sigma_r,
boolean adjust_outliers)
\brief Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines.
|
static opencv_ximgproc.DisparityWLSFilter |
createDisparityWLSFilter(opencv_calib3d.StereoMatcher matcher_left)
\brief Convenience factory method that creates an instance of DisparityWLSFilter and sets up all the relevant
filter parameters automatically based on the matcher instance.
|
static opencv_ximgproc.DisparityWLSFilter |
createDisparityWLSFilterGeneric(boolean use_confidence)
\brief More generic factory method, create instance of DisparityWLSFilter and execute basic
initialization routines.
|
static opencv_ximgproc.DTFilter |
createDTFilter(opencv_core.GpuMat guide,
double sigmaSpatial,
double sigmaColor) |
static opencv_ximgproc.DTFilter |
createDTFilter(opencv_core.GpuMat guide,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters) |
static opencv_ximgproc.DTFilter |
createDTFilter(opencv_core.Mat guide,
double sigmaSpatial,
double sigmaColor) |
static opencv_ximgproc.DTFilter |
createDTFilter(opencv_core.Mat guide,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters)
\brief Factory method, create instance of DTFilter and produce initialization routines.
|
static opencv_ximgproc.DTFilter |
createDTFilter(opencv_core.UMat guide,
double sigmaSpatial,
double sigmaColor) |
static opencv_ximgproc.DTFilter |
createDTFilter(opencv_core.UMat guide,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters) |
static opencv_ximgproc.EdgeAwareInterpolator |
createEdgeAwareInterpolator()
\brief Factory method that creates an instance of the
EdgeAwareInterpolator.
|
static opencv_ximgproc.FastGlobalSmootherFilter |
createFastGlobalSmootherFilter(opencv_core.GpuMat guide,
double lambda,
double sigma_color) |
static opencv_ximgproc.FastGlobalSmootherFilter |
createFastGlobalSmootherFilter(opencv_core.GpuMat guide,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter) |
static opencv_ximgproc.FastGlobalSmootherFilter |
createFastGlobalSmootherFilter(opencv_core.Mat guide,
double lambda,
double sigma_color) |
static opencv_ximgproc.FastGlobalSmootherFilter |
createFastGlobalSmootherFilter(opencv_core.Mat guide,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter)
\brief Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.
|
static opencv_ximgproc.FastGlobalSmootherFilter |
createFastGlobalSmootherFilter(opencv_core.UMat guide,
double lambda,
double sigma_color) |
static opencv_ximgproc.FastGlobalSmootherFilter |
createFastGlobalSmootherFilter(opencv_core.UMat guide,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter) |
static opencv_ximgproc.GraphSegmentation |
createGraphSegmentation() |
static opencv_ximgproc.GraphSegmentation |
createGraphSegmentation(double sigma,
float k,
int min_size)
\brief Creates a graph based segmentor
|
static opencv_ximgproc.GuidedFilter |
createGuidedFilter(opencv_core.GpuMat guide,
int radius,
double eps) |
static opencv_ximgproc.GuidedFilter |
createGuidedFilter(opencv_core.Mat guide,
int radius,
double eps)
\brief Factory method, create instance of GuidedFilter and produce initialization routines.
|
static opencv_ximgproc.GuidedFilter |
createGuidedFilter(opencv_core.UMat guide,
int radius,
double eps) |
static opencv_ximgproc.RFFeatureGetter |
createRFFeatureGetter() |
static opencv_calib3d.StereoMatcher |
createRightMatcher(opencv_calib3d.StereoMatcher matcher_left)
\brief Convenience method to set up the matcher for computing the right-view disparity map
that is required in case of filtering with confidence.
|
static opencv_ximgproc.SelectiveSearchSegmentation |
createSelectiveSearchSegmentation()
\brief Create a new SelectiveSearchSegmentation class.
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyColor |
createSelectiveSearchSegmentationStrategyColor()
\brief Create a new color-based strategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyFill |
createSelectiveSearchSegmentationStrategyFill()
\brief Create a new fill-based strategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
createSelectiveSearchSegmentationStrategyMultiple()
\brief Create a new multiple strategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1)
\brief Create a new multiple strategy and set one subtrategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2)
\brief Create a new multiple strategy and set two subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3)
\brief Create a new multiple strategy and set three subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple |
createSelectiveSearchSegmentationStrategyMultiple(opencv_ximgproc.SelectiveSearchSegmentationStrategy s1,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s2,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s3,
opencv_ximgproc.SelectiveSearchSegmentationStrategy s4)
\brief Create a new multiple strategy and set four subtrategies, with equal weights
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategySize |
createSelectiveSearchSegmentationStrategySize()
\brief Create a new size-based strategy
|
static opencv_ximgproc.SelectiveSearchSegmentationStrategyTexture |
createSelectiveSearchSegmentationStrategyTexture()
\brief Create a new size-based strategy
|
static opencv_ximgproc.StructuredEdgeDetection |
createStructuredEdgeDetection(org.bytedeco.javacpp.BytePointer model) |
static opencv_ximgproc.StructuredEdgeDetection |
createStructuredEdgeDetection(org.bytedeco.javacpp.BytePointer model,
opencv_ximgproc.RFFeatureGetter howToGetFeatures)
The only constructor
|
static opencv_ximgproc.StructuredEdgeDetection |
createStructuredEdgeDetection(String model) |
static opencv_ximgproc.StructuredEdgeDetection |
createStructuredEdgeDetection(String model,
opencv_ximgproc.RFFeatureGetter howToGetFeatures) |
static opencv_ximgproc.SuperpixelLSC |
createSuperpixelLSC(opencv_core.GpuMat image) |
static opencv_ximgproc.SuperpixelLSC |
createSuperpixelLSC(opencv_core.GpuMat image,
int region_size,
float ratio) |
static opencv_ximgproc.SuperpixelLSC |
createSuperpixelLSC(opencv_core.Mat image) |
static opencv_ximgproc.SuperpixelLSC |
createSuperpixelLSC(opencv_core.Mat image,
int region_size,
float ratio)
\brief Class implementing the LSC (Linear Spectral Clustering) superpixels
|
static opencv_ximgproc.SuperpixelLSC |
createSuperpixelLSC(opencv_core.UMat image) |
static opencv_ximgproc.SuperpixelLSC |
createSuperpixelLSC(opencv_core.UMat image,
int region_size,
float ratio) |
static opencv_ximgproc.SuperpixelSEEDS |
createSuperpixelSEEDS(int image_width,
int image_height,
int image_channels,
int num_superpixels,
int num_levels) |
static opencv_ximgproc.SuperpixelSEEDS |
createSuperpixelSEEDS(int image_width,
int image_height,
int image_channels,
int num_superpixels,
int num_levels,
int prior,
int histogram_bins,
boolean double_step)
\brief Initializes a SuperpixelSEEDS object.
|
static opencv_ximgproc.SuperpixelSLIC |
createSuperpixelSLIC(opencv_core.GpuMat image) |
static opencv_ximgproc.SuperpixelSLIC |
createSuperpixelSLIC(opencv_core.GpuMat image,
int algorithm,
int region_size,
float ruler) |
static opencv_ximgproc.SuperpixelSLIC |
createSuperpixelSLIC(opencv_core.Mat image) |
static opencv_ximgproc.SuperpixelSLIC |
createSuperpixelSLIC(opencv_core.Mat image,
int algorithm,
int region_size,
float ruler)
\brief Initialize a SuperpixelSLIC object
|
static opencv_ximgproc.SuperpixelSLIC |
createSuperpixelSLIC(opencv_core.UMat image) |
static opencv_ximgproc.SuperpixelSLIC |
createSuperpixelSLIC(opencv_core.UMat image,
int algorithm,
int region_size,
float ruler) |
static void |
dtFilter(opencv_core.GpuMat guide,
opencv_core.GpuMat src,
opencv_core.GpuMat dst,
double sigmaSpatial,
double sigmaColor) |
static void |
dtFilter(opencv_core.GpuMat guide,
opencv_core.GpuMat src,
opencv_core.GpuMat dst,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters) |
static void |
dtFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
double sigmaSpatial,
double sigmaColor) |
static void |
dtFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters)
\brief Simple one-line Domain Transform filter call.
|
static void |
dtFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
double sigmaSpatial,
double sigmaColor) |
static void |
dtFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters) |
static void |
fastGlobalSmootherFilter(opencv_core.GpuMat guide,
opencv_core.GpuMat src,
opencv_core.GpuMat dst,
double lambda,
double sigma_color) |
static void |
fastGlobalSmootherFilter(opencv_core.GpuMat guide,
opencv_core.GpuMat src,
opencv_core.GpuMat dst,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter) |
static void |
fastGlobalSmootherFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
double lambda,
double sigma_color) |
static void |
fastGlobalSmootherFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter)
\brief Simple one-line Fast Global Smoother filter call.
|
static void |
fastGlobalSmootherFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
double lambda,
double sigma_color) |
static void |
fastGlobalSmootherFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter) |
static void |
FastHoughTransform(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
int dstMatDepth) |
static void |
FastHoughTransform(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
int dstMatDepth,
int angleRange,
int op,
int makeSkew) |
static void |
FastHoughTransform(opencv_core.Mat src,
opencv_core.Mat dst,
int dstMatDepth) |
static void |
FastHoughTransform(opencv_core.Mat src,
opencv_core.Mat dst,
int dstMatDepth,
int angleRange,
int op,
int makeSkew)
\brief Calculates 2D Fast Hough transform of an image.
|
static void |
FastHoughTransform(opencv_core.UMat src,
opencv_core.UMat dst,
int dstMatDepth) |
static void |
FastHoughTransform(opencv_core.UMat src,
opencv_core.UMat dst,
int dstMatDepth,
int angleRange,
int op,
int makeSkew) |
static void |
getDisparityVis(opencv_core.GpuMat src,
opencv_core.GpuMat dst) |
static void |
getDisparityVis(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
double scale) |
static void |
getDisparityVis(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
getDisparityVis(opencv_core.Mat src,
opencv_core.Mat dst,
double scale)
\brief Function for creating a disparity map visualization (clamped CV_8U image)
|
static void |
getDisparityVis(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
getDisparityVis(opencv_core.UMat src,
opencv_core.UMat dst,
double scale) |
static void |
guidedFilter(opencv_core.GpuMat guide,
opencv_core.GpuMat src,
opencv_core.GpuMat dst,
int radius,
double eps) |
static void |
guidedFilter(opencv_core.GpuMat guide,
opencv_core.GpuMat src,
opencv_core.GpuMat dst,
int radius,
double eps,
int dDepth) |
static void |
guidedFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
int radius,
double eps) |
static void |
guidedFilter(opencv_core.Mat guide,
opencv_core.Mat src,
opencv_core.Mat dst,
int radius,
double eps,
int dDepth)
\brief Simple one-line Guided Filter call.
|
static void |
guidedFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
int radius,
double eps) |
static void |
guidedFilter(opencv_core.UMat guide,
opencv_core.UMat src,
opencv_core.UMat dst,
int radius,
double eps,
int dDepth) |
static opencv_core.Scalar4i |
HoughPoint2Line(opencv_core.Point houghPoint,
opencv_core.GpuMat srcImgInfo) |
static opencv_core.Scalar4i |
HoughPoint2Line(opencv_core.Point houghPoint,
opencv_core.GpuMat srcImgInfo,
int angleRange,
int makeSkew,
int rules) |
static opencv_core.Scalar4i |
HoughPoint2Line(opencv_core.Point houghPoint,
opencv_core.Mat srcImgInfo) |
static opencv_core.Scalar4i |
HoughPoint2Line(opencv_core.Point houghPoint,
opencv_core.Mat srcImgInfo,
int angleRange,
int makeSkew,
int rules)
\brief Calculates coordinates of line segment corresponded by point in Hough space.
|
static opencv_core.Scalar4i |
HoughPoint2Line(opencv_core.Point houghPoint,
opencv_core.UMat srcImgInfo) |
static opencv_core.Scalar4i |
HoughPoint2Line(opencv_core.Point houghPoint,
opencv_core.UMat srcImgInfo,
int angleRange,
int makeSkew,
int rules) |
static void |
jointBilateralFilter(opencv_core.GpuMat joint,
opencv_core.GpuMat src,
opencv_core.GpuMat dst,
int d,
double sigmaColor,
double sigmaSpace) |
static void |
jointBilateralFilter(opencv_core.GpuMat joint,
opencv_core.GpuMat src,
opencv_core.GpuMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType) |
static void |
jointBilateralFilter(opencv_core.Mat joint,
opencv_core.Mat src,
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace) |
static void |
jointBilateralFilter(opencv_core.Mat joint,
opencv_core.Mat src,
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType)
\brief Applies the joint bilateral filter to an image.
|
static void |
jointBilateralFilter(opencv_core.UMat joint,
opencv_core.UMat src,
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace) |
static void |
jointBilateralFilter(opencv_core.UMat joint,
opencv_core.UMat src,
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType) |
static void |
l0Smooth(opencv_core.GpuMat src,
opencv_core.GpuMat dst) |
static void |
l0Smooth(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
double lambda,
double kappa) |
static void |
l0Smooth(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
l0Smooth(opencv_core.Mat src,
opencv_core.Mat dst,
double lambda,
double kappa)
\brief Global image smoothing via L0 gradient minimization.
|
static void |
l0Smooth(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
l0Smooth(opencv_core.UMat src,
opencv_core.UMat dst,
double lambda,
double kappa) |
static void |
niBlackThreshold(opencv_core.GpuMat _src,
opencv_core.GpuMat _dst,
double maxValue,
int type,
int blockSize,
double k) |
static void |
niBlackThreshold(opencv_core.GpuMat _src,
opencv_core.GpuMat _dst,
double maxValue,
int type,
int blockSize,
double k,
int binarizationMethod) |
static void |
niBlackThreshold(opencv_core.Mat _src,
opencv_core.Mat _dst,
double maxValue,
int type,
int blockSize,
double k) |
static void |
niBlackThreshold(opencv_core.Mat _src,
opencv_core.Mat _dst,
double maxValue,
int type,
int blockSize,
double k,
int binarizationMethod)
\addtogroup ximgproc
\{
|
static void |
niBlackThreshold(opencv_core.UMat _src,
opencv_core.UMat _dst,
double maxValue,
int type,
int blockSize,
double k) |
static void |
niBlackThreshold(opencv_core.UMat _src,
opencv_core.UMat _dst,
double maxValue,
int type,
int blockSize,
double k,
int binarizationMethod) |
static int |
readGT(org.bytedeco.javacpp.BytePointer src_path,
opencv_core.GpuMat dst) |
static int |
readGT(org.bytedeco.javacpp.BytePointer src_path,
opencv_core.Mat dst)
\brief Function for reading ground truth disparity maps.
|
static int |
readGT(org.bytedeco.javacpp.BytePointer src_path,
opencv_core.UMat dst) |
static int |
readGT(String src_path,
opencv_core.GpuMat dst) |
static int |
readGT(String src_path,
opencv_core.Mat dst) |
static int |
readGT(String src_path,
opencv_core.UMat dst) |
static void |
rollingGuidanceFilter(opencv_core.GpuMat src,
opencv_core.GpuMat dst) |
static void |
rollingGuidanceFilter(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int numOfIter,
int borderType) |
static void |
rollingGuidanceFilter(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
rollingGuidanceFilter(opencv_core.Mat src,
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int numOfIter,
int borderType)
\brief Applies the rolling guidance filter to an image.
|
static void |
rollingGuidanceFilter(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
rollingGuidanceFilter(opencv_core.UMat src,
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int numOfIter,
int borderType) |
static void |
thinning(opencv_core.GpuMat src,
opencv_core.GpuMat dst) |
static void |
thinning(opencv_core.GpuMat src,
opencv_core.GpuMat dst,
int thinningType) |
static void |
thinning(opencv_core.Mat src,
opencv_core.Mat dst) |
static void |
thinning(opencv_core.Mat src,
opencv_core.Mat dst,
int thinningType)
\brief Applies a binary blob thinning operation, to achieve a skeletization of the input image.
|
static void |
thinning(opencv_core.UMat src,
opencv_core.UMat dst) |
static void |
thinning(opencv_core.UMat src,
opencv_core.UMat dst,
int thinningType) |
mappublic static final int THINNING_ZHANGSUEN
public static final int THINNING_GUOHALL
public static final int BINARIZATION_NIBLACK
public static final int BINARIZATION_SAUVOLA
public static final int BINARIZATION_WOLF
public static final int BINARIZATION_NICK
public static final int DTF_NC
public static final int DTF_IC
public static final int DTF_RF
public static final int GUIDED_FILTER
public static final int AM_FILTER
public static final int ARO_0_45
public static final int ARO_45_90
public static final int ARO_90_135
public static final int ARO_315_0
public static final int ARO_315_45
public static final int ARO_45_135
public static final int ARO_315_135
public static final int ARO_CTR_HOR
public static final int ARO_CTR_VER
public static final int FHT_MIN
public static final int FHT_MAX
public static final int FHT_ADD
public static final int FHT_AVE
public static final int HDO_RAW
public static final int HDO_DESKEW
public static final int RO_STRICT
public static final int RO_IGNORE_BORDERS
public static final int SLIC
public static final int SLICO
public static final int MSLIC
@Namespace(value="cv::ximgproc")
public static void niBlackThreshold(@ByVal
opencv_core.Mat _src,
@ByVal
opencv_core.Mat _dst,
double maxValue,
int type,
int blockSize,
double k,
int binarizationMethod)
/** \brief Performs thresholding on input images using Niblack's technique or some of the popular variations it inspired.
The function transforms a grayscale image to a binary image according to the formulae: - **THRESH_BINARY** \f[dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}\f] - **THRESH_BINARY_INV** \f[dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}\f] where \f$T(x,y)\f$ is a threshold calculated individually for each pixel.
The threshold value \f$T(x, y)\f$ is determined based on the binarization method chosen. For classic Niblack, it is the mean minus \f$ k \f$ times standard deviation of \f$\texttt{blockSize} \times\texttt{blockSize}\f$ neighborhood of \f$(x, y)\f$.
The function can't process the image in-place.
_src - Source 8-bit single-channel image._dst - Destination image of the same size and the same type as src.maxValue - Non-zero value assigned to the pixels for which the condition is satisfied,
used with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.type - Thresholding type, see cv::ThresholdTypes.blockSize - Size of a pixel neighborhood that is used to calculate a threshold value
for the pixel: 3, 5, 7, and so on.k - The user-adjustable parameter used by Niblack and inspired techniques. For Niblack, this is
normally a value between 0 and 1 that is multiplied with the standard deviation and subtracted from
the mean.binarizationMethod - Binarization method to use. By default, Niblack's technique is used.
Other techniques can be specified, see cv::ximgproc::LocalBinarizationMethods.
\sa threshold, adaptiveThreshold
@Namespace(value="cv::ximgproc")
public static void niBlackThreshold(@ByVal
opencv_core.Mat _src,
@ByVal
opencv_core.Mat _dst,
double maxValue,
int type,
int blockSize,
double k)
@Namespace(value="cv::ximgproc")
public static void niBlackThreshold(@ByVal
opencv_core.UMat _src,
@ByVal
opencv_core.UMat _dst,
double maxValue,
int type,
int blockSize,
double k,
int binarizationMethod)
@Namespace(value="cv::ximgproc")
public static void niBlackThreshold(@ByVal
opencv_core.UMat _src,
@ByVal
opencv_core.UMat _dst,
double maxValue,
int type,
int blockSize,
double k)
@Namespace(value="cv::ximgproc")
public static void niBlackThreshold(@ByVal
opencv_core.GpuMat _src,
@ByVal
opencv_core.GpuMat _dst,
double maxValue,
int type,
int blockSize,
double k,
int binarizationMethod)
@Namespace(value="cv::ximgproc")
public static void niBlackThreshold(@ByVal
opencv_core.GpuMat _src,
@ByVal
opencv_core.GpuMat _dst,
double maxValue,
int type,
int blockSize,
double k)
@Namespace(value="cv::ximgproc")
public static void thinning(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
int thinningType)
The function transforms a binary blob image into a skeletized form using the technique of Zhang-Suen.
src - Source 8-bit single-channel image, containing binary blobs, with blobs having 255 pixel values.dst - Destination image of the same size and the same type as src. The function can work in-place.thinningType - Value that defines which thinning algorithm should be used. See cv::ximgproc::ThinningTypes@Namespace(value="cv::ximgproc")
public static void thinning(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst)
@Namespace(value="cv::ximgproc")
public static void thinning(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
int thinningType)
@Namespace(value="cv::ximgproc")
public static void thinning(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst)
@Namespace(value="cv::ximgproc")
public static void thinning(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
int thinningType)
@Namespace(value="cv::ximgproc")
public static void thinning(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst)
@Namespace(value="cv::ximgproc")
public static void anisotropicDiffusion(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
float alpha,
float K,
int niters)
The function applies Perona-Malik anisotropic diffusion to an image. This is the solution to the partial differential equation:
\f[{\frac {\partial I}{\partial t}}={\mathrm {div}}\left(c(x,y,t)\nabla I\right)=\nabla c\cdot \nabla I+c(x,y,t)\Delta I\f]
Suggested functions for c(x,y,t) are:
\f[c\left(\|\nabla I\|\right)=e^{{-\left(\|\nabla I\|/K\right)^{2}}}\f]
or
\f[ c\left(\|\nabla I\|\right)={\frac {1}{1+\left({\frac {\|\nabla I\|}{K}}\right)^{2}}} \f]
src - Grayscale Source image.dst - Destination image of the same size and the same number of channels as src .alpha - The amount of time to step forward by on each iteration (normally, it's between 0 and 1).K - sensitivity to the edgesniters - The number of iterations@Namespace(value="cv::ximgproc")
public static void anisotropicDiffusion(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
float alpha,
float K,
int niters)
@Namespace(value="cv::ximgproc")
public static void anisotropicDiffusion(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
float alpha,
float K,
int niters)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.DTFilter createDTFilter(@ByVal opencv_core.Mat guide, double sigmaSpatial, double sigmaColor, int mode, int numIters)
guide - guided image (used to build transformed distance, which describes edge structure of
guided image).
sigmaSpatial - \f${\sigma}_H\f$ parameter in the original article, it's similar to the sigma in the
coordinate space into bilateralFilter.
sigmaColor - \f${\sigma}_r\f$ parameter in the original article, it's similar to the sigma in the
color space into bilateralFilter.
mode - one form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for
filtering 2D signals in the article.
numIters - optional number of iterations used for filtering, 3 is quite enough.
For more details about Domain Transform filter parameters, see the original article \cite Gastal11 and [Domain Transform filter homepage](http://www.inf.ufrgs.br/~eslgastal/DomainTransform/).
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.DTFilter createDTFilter(@ByVal opencv_core.Mat guide, double sigmaSpatial, double sigmaColor)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.DTFilter createDTFilter(@ByVal opencv_core.UMat guide, double sigmaSpatial, double sigmaColor, int mode, int numIters)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.DTFilter createDTFilter(@ByVal opencv_core.UMat guide, double sigmaSpatial, double sigmaColor)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.DTFilter createDTFilter(@ByVal opencv_core.GpuMat guide, double sigmaSpatial, double sigmaColor, int mode, int numIters)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.DTFilter createDTFilter(@ByVal opencv_core.GpuMat guide, double sigmaSpatial, double sigmaColor)
@Namespace(value="cv::ximgproc")
public static void dtFilter(@ByVal
opencv_core.Mat guide,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters)
guide - guided image (also called as joint image) with unsigned 8-bit or floating-point 32-bit
depth and up to 4 channels.src - filtering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.dst - sigmaSpatial - \f${\sigma}_H\f$ parameter in the original article, it's similar to the sigma in the
coordinate space into bilateralFilter.sigmaColor - \f${\sigma}_r\f$ parameter in the original article, it's similar to the sigma in the
color space into bilateralFilter.mode - one form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for
filtering 2D signals in the article.numIters - optional number of iterations used for filtering, 3 is quite enough.
\sa bilateralFilter, guidedFilter, amFilter@Namespace(value="cv::ximgproc")
public static void dtFilter(@ByVal
opencv_core.Mat guide,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
double sigmaSpatial,
double sigmaColor)
@Namespace(value="cv::ximgproc")
public static void dtFilter(@ByVal
opencv_core.UMat guide,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters)
@Namespace(value="cv::ximgproc")
public static void dtFilter(@ByVal
opencv_core.UMat guide,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
double sigmaSpatial,
double sigmaColor)
@Namespace(value="cv::ximgproc")
public static void dtFilter(@ByVal
opencv_core.GpuMat guide,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters)
@Namespace(value="cv::ximgproc")
public static void dtFilter(@ByVal
opencv_core.GpuMat guide,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
double sigmaSpatial,
double sigmaColor)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.GuidedFilter createGuidedFilter(@ByVal opencv_core.Mat guide, int radius, double eps)
guide - guided image (or array of images) with up to 3 channels, if it have more then 3
channels then only first 3 channels will be used.
radius - radius of Guided Filter.
eps - regularization term of Guided Filter. \f${eps}^2\f$ is similar to the sigma in the color
space into bilateralFilter.
For more details about Guided Filter parameters, see the original article \cite Kaiming10 .
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.GuidedFilter createGuidedFilter(@ByVal opencv_core.UMat guide, int radius, double eps)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.GuidedFilter createGuidedFilter(@ByVal opencv_core.GpuMat guide, int radius, double eps)
@Namespace(value="cv::ximgproc")
public static void guidedFilter(@ByVal
opencv_core.Mat guide,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
int radius,
double eps,
int dDepth)
If you have multiple images to filter with the same guided image then use GuidedFilter interface to avoid extra computations on initialization stage.
guide - guided image (or array of images) with up to 3 channels, if it have more then 3
channels then only first 3 channels will be used.
src - filtering image with any numbers of channels.
dst - output image.
radius - radius of Guided Filter.
eps - regularization term of Guided Filter. \f${eps}^2\f$ is similar to the sigma in the color
space into bilateralFilter.
dDepth - optional depth of the output image.
\sa bilateralFilter, dtFilter, amFilter
@Namespace(value="cv::ximgproc")
public static void guidedFilter(@ByVal
opencv_core.Mat guide,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
int radius,
double eps)
@Namespace(value="cv::ximgproc")
public static void guidedFilter(@ByVal
opencv_core.UMat guide,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
int radius,
double eps,
int dDepth)
@Namespace(value="cv::ximgproc")
public static void guidedFilter(@ByVal
opencv_core.UMat guide,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
int radius,
double eps)
@Namespace(value="cv::ximgproc")
public static void guidedFilter(@ByVal
opencv_core.GpuMat guide,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
int radius,
double eps,
int dDepth)
@Namespace(value="cv::ximgproc")
public static void guidedFilter(@ByVal
opencv_core.GpuMat guide,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
int radius,
double eps)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.AdaptiveManifoldFilter createAMFilter(double sigma_s, double sigma_r, @Cast(value="bool") boolean adjust_outliers)
sigma_s - spatial standard deviation.
sigma_r - color space standard deviation, it is similar to the sigma in the color space into
bilateralFilter.
adjust_outliers - optional, specify perform outliers adjust operation or not, (Eq. 9) in the
original paper.
For more details about Adaptive Manifold Filter parameters, see the original article \cite Gastal12 .
\note Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1] color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same sigmas in bilateralFilter and dtFilter functions.
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.AdaptiveManifoldFilter createAMFilter(double sigma_s, double sigma_r)
@Namespace(value="cv::ximgproc")
public static void amFilter(@ByVal
opencv_core.Mat joint,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
double sigma_s,
double sigma_r,
@Cast(value="bool")
boolean adjust_outliers)
joint - joint (also called as guided) image or array of images with any numbers of channels.
src - filtering image with any numbers of channels.
dst - output image.
sigma_s - spatial standard deviation.
sigma_r - color space standard deviation, it is similar to the sigma in the color space into
bilateralFilter.
adjust_outliers - optional, specify perform outliers adjust operation or not, (Eq. 9) in the
original paper.
\note Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1] color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same sigmas in bilateralFilter and dtFilter functions. \sa bilateralFilter, dtFilter, guidedFilter
@Namespace(value="cv::ximgproc")
public static void amFilter(@ByVal
opencv_core.Mat joint,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
double sigma_s,
double sigma_r)
@Namespace(value="cv::ximgproc")
public static void amFilter(@ByVal
opencv_core.UMat joint,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
double sigma_s,
double sigma_r,
@Cast(value="bool")
boolean adjust_outliers)
@Namespace(value="cv::ximgproc")
public static void amFilter(@ByVal
opencv_core.UMat joint,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
double sigma_s,
double sigma_r)
@Namespace(value="cv::ximgproc")
public static void amFilter(@ByVal
opencv_core.GpuMat joint,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
double sigma_s,
double sigma_r,
@Cast(value="bool")
boolean adjust_outliers)
@Namespace(value="cv::ximgproc")
public static void amFilter(@ByVal
opencv_core.GpuMat joint,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
double sigma_s,
double sigma_r)
@Namespace(value="cv::ximgproc")
public static void jointBilateralFilter(@ByVal
opencv_core.Mat joint,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType)
joint - Joint 8-bit or floating-point, 1-channel or 3-channel image.
src - Source 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint
image.
dst - Destination image of the same size and type as src .
d - Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
it is computed from sigmaSpace .
sigmaColor - Filter sigma in the color space. A larger value of the parameter means that
farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
larger areas of semi-equal color.
sigmaSpace - Filter sigma in the coordinate space. A larger value of the parameter means that
farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
proportional to sigmaSpace .
borderType - \note bilateralFilter and jointBilateralFilter use L1 norm to compute difference between colors.
\sa bilateralFilter, amFilter
@Namespace(value="cv::ximgproc")
public static void jointBilateralFilter(@ByVal
opencv_core.Mat joint,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace)
@Namespace(value="cv::ximgproc")
public static void jointBilateralFilter(@ByVal
opencv_core.UMat joint,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType)
@Namespace(value="cv::ximgproc")
public static void jointBilateralFilter(@ByVal
opencv_core.UMat joint,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace)
@Namespace(value="cv::ximgproc")
public static void jointBilateralFilter(@ByVal
opencv_core.GpuMat joint,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType)
@Namespace(value="cv::ximgproc")
public static void jointBilateralFilter(@ByVal
opencv_core.GpuMat joint,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
int d,
double sigmaColor,
double sigmaSpace)
@Namespace(value="cv::ximgproc")
public static void bilateralTextureFilter(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
int fr,
int numIter,
double sigmaAlpha,
double sigmaAvg)
src - Source image whose depth is 8-bit UINT or 32-bit FLOAT
dst - Destination image of the same size and type as src.
fr - Radius of kernel to be used for filtering. It should be positive integer
numIter - Number of iterations of algorithm, It should be positive integer
sigmaAlpha - Controls the sharpness of the weight transition from edges to smooth/texture regions, where
a bigger value means sharper transition. When the value is negative, it is automatically calculated.
sigmaAvg - Range blur parameter for texture blurring. Larger value makes result to be more blurred. When the
value is negative, it is automatically calculated as described in the paper.
\sa rollingGuidanceFilter, bilateralFilter
@Namespace(value="cv::ximgproc")
public static void bilateralTextureFilter(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst)
@Namespace(value="cv::ximgproc")
public static void bilateralTextureFilter(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
int fr,
int numIter,
double sigmaAlpha,
double sigmaAvg)
@Namespace(value="cv::ximgproc")
public static void bilateralTextureFilter(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst)
@Namespace(value="cv::ximgproc")
public static void bilateralTextureFilter(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
int fr,
int numIter,
double sigmaAlpha,
double sigmaAvg)
@Namespace(value="cv::ximgproc")
public static void bilateralTextureFilter(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst)
@Namespace(value="cv::ximgproc")
public static void rollingGuidanceFilter(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int numOfIter,
int borderType)
For more details, please see \cite zhang2014rolling
src - Source 8-bit or floating-point, 1-channel or 3-channel image.
dst - Destination image of the same size and type as src.
d - Diameter of each pixel neighborhood that is used during filtering. If it is non-positive,
it is computed from sigmaSpace .
sigmaColor - Filter sigma in the color space. A larger value of the parameter means that
farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in
larger areas of semi-equal color.
sigmaSpace - Filter sigma in the coordinate space. A larger value of the parameter means that
farther pixels will influence each other as long as their colors are close enough (see sigmaColor ).
When d\>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is
proportional to sigmaSpace .
numOfIter - Number of iterations of joint edge-preserving filtering applied on the source image.
borderType - \note rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
\sa jointBilateralFilter, bilateralFilter, amFilter
@Namespace(value="cv::ximgproc")
public static void rollingGuidanceFilter(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst)
@Namespace(value="cv::ximgproc")
public static void rollingGuidanceFilter(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int numOfIter,
int borderType)
@Namespace(value="cv::ximgproc")
public static void rollingGuidanceFilter(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst)
@Namespace(value="cv::ximgproc")
public static void rollingGuidanceFilter(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
int d,
double sigmaColor,
double sigmaSpace,
int numOfIter,
int borderType)
@Namespace(value="cv::ximgproc")
public static void rollingGuidanceFilter(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.FastGlobalSmootherFilter createFastGlobalSmootherFilter(@ByVal opencv_core.Mat guide, double lambda, double sigma_color, double lambda_attenuation, int num_iter)
guide - image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
lambda - parameter defining the amount of regularization
sigma_color - parameter, that is similar to color space sigma in bilateralFilter.
lambda_attenuation - internal parameter, defining how much lambda decreases after each iteration. Normally,
it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
num_iter - number of iterations used for filtering, 3 is usually enough.
For more details about Fast Global Smoother parameters, see the original paper \cite Min2014. However, please note that there are several differences. Lambda attenuation described in the paper is implemented a bit differently so do not expect the results to be identical to those from the paper; sigma_color values from the paper should be multiplied by 255.0 to achieve the same effect. Also, in case of image filtering where source and guide image are the same, authors propose to dynamically update the guide image after each iteration. To maximize the performance this feature was not implemented here.
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.FastGlobalSmootherFilter createFastGlobalSmootherFilter(@ByVal opencv_core.Mat guide, double lambda, double sigma_color)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.FastGlobalSmootherFilter createFastGlobalSmootherFilter(@ByVal opencv_core.UMat guide, double lambda, double sigma_color, double lambda_attenuation, int num_iter)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.FastGlobalSmootherFilter createFastGlobalSmootherFilter(@ByVal opencv_core.UMat guide, double lambda, double sigma_color)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.FastGlobalSmootherFilter createFastGlobalSmootherFilter(@ByVal opencv_core.GpuMat guide, double lambda, double sigma_color, double lambda_attenuation, int num_iter)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.FastGlobalSmootherFilter createFastGlobalSmootherFilter(@ByVal opencv_core.GpuMat guide, double lambda, double sigma_color)
@Namespace(value="cv::ximgproc")
public static void fastGlobalSmootherFilter(@ByVal
opencv_core.Mat guide,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter)
guide - image serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
src - source image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
dst - destination image.
lambda - parameter defining the amount of regularization
sigma_color - parameter, that is similar to color space sigma in bilateralFilter.
lambda_attenuation - internal parameter, defining how much lambda decreases after each iteration. Normally,
it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
num_iter - number of iterations used for filtering, 3 is usually enough.@Namespace(value="cv::ximgproc")
public static void fastGlobalSmootherFilter(@ByVal
opencv_core.Mat guide,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
double lambda,
double sigma_color)
@Namespace(value="cv::ximgproc")
public static void fastGlobalSmootherFilter(@ByVal
opencv_core.UMat guide,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter)
@Namespace(value="cv::ximgproc")
public static void fastGlobalSmootherFilter(@ByVal
opencv_core.UMat guide,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
double lambda,
double sigma_color)
@Namespace(value="cv::ximgproc")
public static void fastGlobalSmootherFilter(@ByVal
opencv_core.GpuMat guide,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter)
@Namespace(value="cv::ximgproc")
public static void fastGlobalSmootherFilter(@ByVal
opencv_core.GpuMat guide,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
double lambda,
double sigma_color)
@Namespace(value="cv::ximgproc")
public static void l0Smooth(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
double lambda,
double kappa)
src - source image for filtering with unsigned 8-bit or signed 16-bit or floating-point depth.
dst - destination image.
lambda - parameter defining the smooth term weight.
kappa - parameter defining the increasing factor of the weight of the gradient data term.
For more details about L0 Smoother, see the original paper \cite xu2011image.
@Namespace(value="cv::ximgproc")
public static void l0Smooth(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst)
@Namespace(value="cv::ximgproc")
public static void l0Smooth(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
double lambda,
double kappa)
@Namespace(value="cv::ximgproc")
public static void l0Smooth(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst)
@Namespace(value="cv::ximgproc")
public static void l0Smooth(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
double lambda,
double kappa)
@Namespace(value="cv::ximgproc")
public static void l0Smooth(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.DisparityWLSFilter createDisparityWLSFilter(@opencv_core.Ptr opencv_calib3d.StereoMatcher matcher_left)
matcher_left - stereo matcher instance that will be used with the filter@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_calib3d.StereoMatcher createRightMatcher(@opencv_core.Ptr opencv_calib3d.StereoMatcher matcher_left)
matcher_left - main stereo matcher instance that will be used with the filter@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.DisparityWLSFilter createDisparityWLSFilterGeneric(@Cast(value="bool") boolean use_confidence)
use_confidence - filtering with confidence requires two disparity maps (for the left and right views) and is
approximately two times slower. However, quality is typically significantly better.@Namespace(value="cv::ximgproc") public static int readGT(@opencv_core.Str org.bytedeco.javacpp.BytePointer src_path, @ByVal opencv_core.Mat dst)
src_path - path to the image, containing ground-truth disparity map
dst - output disparity map, CV_16S depth
\result returns zero if successfully read the ground truth
@Namespace(value="cv::ximgproc") public static int readGT(@opencv_core.Str String src_path, @ByVal opencv_core.Mat dst)
@Namespace(value="cv::ximgproc") public static int readGT(@opencv_core.Str String src_path, @ByVal opencv_core.UMat dst)
@Namespace(value="cv::ximgproc") public static int readGT(@opencv_core.Str org.bytedeco.javacpp.BytePointer src_path, @ByVal opencv_core.UMat dst)
@Namespace(value="cv::ximgproc") public static int readGT(@opencv_core.Str org.bytedeco.javacpp.BytePointer src_path, @ByVal opencv_core.GpuMat dst)
@Namespace(value="cv::ximgproc") public static int readGT(@opencv_core.Str String src_path, @ByVal opencv_core.GpuMat dst)
@Namespace(value="cv::ximgproc")
public static double computeMSE(@ByVal
opencv_core.Mat GT,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Rect ROI)
GT - ground truth disparity map
src - disparity map to evaluate
ROI - region of interest
\result returns mean square error between GT and src
@Namespace(value="cv::ximgproc")
public static double computeMSE(@ByVal
opencv_core.UMat GT,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.Rect ROI)
@Namespace(value="cv::ximgproc")
public static double computeMSE(@ByVal
opencv_core.GpuMat GT,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.Rect ROI)
@Namespace(value="cv::ximgproc")
public static double computeBadPixelPercent(@ByVal
opencv_core.Mat GT,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Rect ROI,
int thresh)
GT - ground truth disparity map
src - disparity map to evaluate
ROI - region of interest
thresh - threshold used to determine "bad" pixels
\result returns mean square error between GT and src
@Namespace(value="cv::ximgproc")
public static double computeBadPixelPercent(@ByVal
opencv_core.Mat GT,
@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Rect ROI)
@Namespace(value="cv::ximgproc")
public static double computeBadPixelPercent(@ByVal
opencv_core.UMat GT,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.Rect ROI,
int thresh)
@Namespace(value="cv::ximgproc")
public static double computeBadPixelPercent(@ByVal
opencv_core.UMat GT,
@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.Rect ROI)
@Namespace(value="cv::ximgproc")
public static double computeBadPixelPercent(@ByVal
opencv_core.GpuMat GT,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.Rect ROI,
int thresh)
@Namespace(value="cv::ximgproc")
public static double computeBadPixelPercent(@ByVal
opencv_core.GpuMat GT,
@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.Rect ROI)
@Namespace(value="cv::ximgproc")
public static void getDisparityVis(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
double scale)
src - input disparity map (CV_16S depth)
dst - output visualization
scale - disparity map will be multiplied by this value for visualization@Namespace(value="cv::ximgproc")
public static void getDisparityVis(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst)
@Namespace(value="cv::ximgproc")
public static void getDisparityVis(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
double scale)
@Namespace(value="cv::ximgproc")
public static void getDisparityVis(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst)
@Namespace(value="cv::ximgproc")
public static void getDisparityVis(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
double scale)
@Namespace(value="cv::ximgproc")
public static void getDisparityVis(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.EdgeAwareInterpolator createEdgeAwareInterpolator()
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.RFFeatureGetter createRFFeatureGetter()
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.StructuredEdgeDetection createStructuredEdgeDetection(@opencv_core.Str org.bytedeco.javacpp.BytePointer model, @Const @opencv_core.Ptr opencv_ximgproc.RFFeatureGetter howToGetFeatures)
model - : name of the file where the model is storedhowToGetFeatures - : optional object inheriting from RFFeatureGetter.
You need it only if you would like to train your
own forest, pass NULL otherwise@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.StructuredEdgeDetection createStructuredEdgeDetection(@opencv_core.Str org.bytedeco.javacpp.BytePointer model)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.StructuredEdgeDetection createStructuredEdgeDetection(@opencv_core.Str String model, @Const @opencv_core.Ptr opencv_ximgproc.RFFeatureGetter howToGetFeatures)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.StructuredEdgeDetection createStructuredEdgeDetection(@opencv_core.Str String model)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelSEEDS createSuperpixelSEEDS(int image_width, int image_height, int image_channels, int num_superpixels, int num_levels, int prior, int histogram_bins, @Cast(value="bool") boolean double_step)
image_width - Image width.image_height - Image height.image_channels - Number of channels of the image.num_superpixels - Desired number of superpixels. Note that the actual number may be smaller
due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to
get the actual number.num_levels - Number of block levels. The more levels, the more accurate is the segmentation,
but needs more memory and CPU time.prior - enable 3x3 shape smoothing term if \>0. A larger value leads to smoother shapes. prior
must be in the range [0, 5].histogram_bins - Number of histogram bins.double_step - If true, iterate each block level twice for higher accuracy.
The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and double_step.
The number of levels in num_levels defines the amount of block levels that the algorithm use in the optimization. The initialization is a grid, in which the superpixels are equally distributed through the width and the height of the image. The larger blocks correspond to the superpixel size, and the levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels, recursively until the smaller block level. An example of initialization of 4 block levels is illustrated in the following figure.

@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelSEEDS createSuperpixelSEEDS(int image_width, int image_height, int image_channels, int num_superpixels, int num_levels)
@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.GraphSegmentation createGraphSegmentation(double sigma, float k, int min_size)
sigma - The sigma parameter, used to smooth imagek - The k parameter of the algorythmmin_size - The minimum size of segments@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.GraphSegmentation createGraphSegmentation()
@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.SelectiveSearchSegmentationStrategyColor createSelectiveSearchSegmentationStrategyColor()
@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.SelectiveSearchSegmentationStrategySize createSelectiveSearchSegmentationStrategySize()
@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.SelectiveSearchSegmentationStrategyTexture createSelectiveSearchSegmentationStrategyTexture()
@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.SelectiveSearchSegmentationStrategyFill createSelectiveSearchSegmentationStrategyFill()
@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple()
@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(@opencv_core.Ptr opencv_ximgproc.SelectiveSearchSegmentationStrategy s1)
s1 - The first strategy@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(@opencv_core.Ptr opencv_ximgproc.SelectiveSearchSegmentationStrategy s1, @opencv_core.Ptr opencv_ximgproc.SelectiveSearchSegmentationStrategy s2)
s1 - The first strategys2 - The second strategy@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(@opencv_core.Ptr opencv_ximgproc.SelectiveSearchSegmentationStrategy s1, @opencv_core.Ptr opencv_ximgproc.SelectiveSearchSegmentationStrategy s2, @opencv_core.Ptr opencv_ximgproc.SelectiveSearchSegmentationStrategy s3)
s1 - The first strategys2 - The second strategys3 - The third strategy@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple(@opencv_core.Ptr opencv_ximgproc.SelectiveSearchSegmentationStrategy s1, @opencv_core.Ptr opencv_ximgproc.SelectiveSearchSegmentationStrategy s2, @opencv_core.Ptr opencv_ximgproc.SelectiveSearchSegmentationStrategy s3, @opencv_core.Ptr opencv_ximgproc.SelectiveSearchSegmentationStrategy s4)
s1 - The first strategys2 - The second strategys3 - The third strategys4 - The forth strategy@Namespace(value="cv::ximgproc::segmentation") @opencv_core.Ptr public static opencv_ximgproc.SelectiveSearchSegmentation createSelectiveSearchSegmentation()
@Namespace(value="cv::ximgproc")
public static void FastHoughTransform(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
int dstMatDepth,
int angleRange,
int op,
int makeSkew)
dst - The destination image, result of transformation.src - The source (input) image.dstMatDepth - The depth of destination imageop - The operation to be applied, see cv::HoughOpangleRange - The part of Hough space to calculate, see cv::AngleRangeOptionmakeSkew - Specifies to do or not to do image skewing, see cv::HoughDeskewOption
The function calculates the fast Hough transform for full, half or quarter
range of angles.@Namespace(value="cv::ximgproc")
public static void FastHoughTransform(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
int dstMatDepth)
@Namespace(value="cv::ximgproc")
public static void FastHoughTransform(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
int dstMatDepth,
int angleRange,
int op,
int makeSkew)
@Namespace(value="cv::ximgproc")
public static void FastHoughTransform(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
int dstMatDepth)
@Namespace(value="cv::ximgproc")
public static void FastHoughTransform(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
int dstMatDepth,
int angleRange,
int op,
int makeSkew)
@Namespace(value="cv::ximgproc")
public static void FastHoughTransform(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
int dstMatDepth)
@Namespace(value="cv::ximgproc") @ByVal public static opencv_core.Scalar4i HoughPoint2Line(@Const @ByRef opencv_core.Point houghPoint, @ByVal opencv_core.Mat srcImgInfo, int angleRange, int makeSkew, int rules)
houghPoint - Point in Hough space.srcImgInfo - The source (input) image of Hough transform.angleRange - The part of Hough space where point is situated, see cv::AngleRangeOptionmakeSkew - Specifies to do or not to do image skewing, see cv::HoughDeskewOptionrules - Specifies strictness of line segment calculating, see cv::RulesOption
\retval [Vec4i] Coordinates of line segment corresponded by point in Hough space.
\remarks If rules parameter set to RO_STRICT
then returned line cut along the border of source image.
\remarks If rules parameter set to RO_WEAK then in case of point, which belongs
the incorrect part of Hough image, returned line will not intersect source image.
The function calculates coordinates of line segment corresponded by point in Hough space.@Namespace(value="cv::ximgproc") @ByVal public static opencv_core.Scalar4i HoughPoint2Line(@Const @ByRef opencv_core.Point houghPoint, @ByVal opencv_core.Mat srcImgInfo)
@Namespace(value="cv::ximgproc") @ByVal public static opencv_core.Scalar4i HoughPoint2Line(@Const @ByRef opencv_core.Point houghPoint, @ByVal opencv_core.UMat srcImgInfo, int angleRange, int makeSkew, int rules)
@Namespace(value="cv::ximgproc") @ByVal public static opencv_core.Scalar4i HoughPoint2Line(@Const @ByRef opencv_core.Point houghPoint, @ByVal opencv_core.UMat srcImgInfo)
@Namespace(value="cv::ximgproc") @ByVal public static opencv_core.Scalar4i HoughPoint2Line(@Const @ByRef opencv_core.Point houghPoint, @ByVal opencv_core.GpuMat srcImgInfo, int angleRange, int makeSkew, int rules)
@Namespace(value="cv::ximgproc") @ByVal public static opencv_core.Scalar4i HoughPoint2Line(@Const @ByRef opencv_core.Point houghPoint, @ByVal opencv_core.GpuMat srcImgInfo)
@Namespace(value="cv::ximgproc")
public static void covarianceEstimation(@ByVal
opencv_core.Mat src,
@ByVal
opencv_core.Mat dst,
int windowRows,
int windowCols)
src - The source image. Input image must be of a complex type.dst - The destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).windowRows - The number of rows in the window.windowCols - The number of cols in the window.
The window size parameters control the accuracy of the estimation.
The sliding window moves over the entire image from the top-left corner
to the bottom right corner. Each location of the window represents a sample.
If the window is the size of the image, then this gives the exact covariance matrix.
For all other cases, the sizes of the window will impact the number of samples
and the number of elements in the estimated covariance matrix.@Namespace(value="cv::ximgproc")
public static void covarianceEstimation(@ByVal
opencv_core.UMat src,
@ByVal
opencv_core.UMat dst,
int windowRows,
int windowCols)
@Namespace(value="cv::ximgproc")
public static void covarianceEstimation(@ByVal
opencv_core.GpuMat src,
@ByVal
opencv_core.GpuMat dst,
int windowRows,
int windowCols)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelSLIC createSuperpixelSLIC(@ByVal opencv_core.Mat image, int algorithm, int region_size, float ruler)
image - Image to segmentalgorithm - Chooses the algorithm variant to use:
SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor,
while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.region_size - Chooses an average superpixel size measured in pixelsruler - Chooses the enforcement of superpixel smoothness factor of superpixel
The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image. For enanched results it is recommended for color images to preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.

@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelSLIC createSuperpixelSLIC(@ByVal opencv_core.Mat image)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelSLIC createSuperpixelSLIC(@ByVal opencv_core.UMat image, int algorithm, int region_size, float ruler)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelSLIC createSuperpixelSLIC(@ByVal opencv_core.UMat image)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelSLIC createSuperpixelSLIC(@ByVal opencv_core.GpuMat image, int algorithm, int region_size, float ruler)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelSLIC createSuperpixelSLIC(@ByVal opencv_core.GpuMat image)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelLSC createSuperpixelLSC(@ByVal opencv_core.Mat image, int region_size, float ratio)
image - Image to segmentregion_size - Chooses an average superpixel size measured in pixelsratio - Chooses the enforcement of superpixel compactness factor of superpixel
The function initializes a SuperpixelLSC object for the input image. It sets the parameters of superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image. An example of LSC is ilustrated in the following picture. For enanched results it is recommended for color images to preprocess image with little gaussian blur with a small 3 x 3 kernel and additional conversion into CieLAB color space.

@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelLSC createSuperpixelLSC(@ByVal opencv_core.Mat image)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelLSC createSuperpixelLSC(@ByVal opencv_core.UMat image, int region_size, float ratio)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelLSC createSuperpixelLSC(@ByVal opencv_core.UMat image)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelLSC createSuperpixelLSC(@ByVal opencv_core.GpuMat image, int region_size, float ratio)
@Namespace(value="cv::ximgproc") @opencv_core.Ptr public static opencv_ximgproc.SuperpixelLSC createSuperpixelLSC(@ByVal opencv_core.GpuMat image)
Copyright © 2018. All rights reserved.