OpenCV for Unity 2.6.3
Enox Software / Please refer to OpenCV official document ( http://docs.opencv.org/4.10.0/index.html ) for the details of the argument of the method.
Loading...
Searching...
No Matches
OpenCVForUnity.XimgprocModule.Ximgproc Class Reference

Static Public Member Functions

static void niBlackThreshold (Mat _src, Mat _dst, double maxValue, int type, int blockSize, double k, int binarizationMethod, double r)
 Performs thresholding on input images using Niblack's technique or some of the popular variations it inspired.
 
static void niBlackThreshold (Mat _src, Mat _dst, double maxValue, int type, int blockSize, double k, int binarizationMethod)
 Performs thresholding on input images using Niblack's technique or some of the popular variations it inspired.
 
static void niBlackThreshold (Mat _src, Mat _dst, double maxValue, int type, int blockSize, double k)
 Performs thresholding on input images using Niblack's technique or some of the popular variations it inspired.
 
static void thinning (Mat src, Mat dst, int thinningType)
 Applies a binary blob thinning operation, to achieve a skeletization of the input image.
 
static void thinning (Mat src, Mat dst)
 Applies a binary blob thinning operation, to achieve a skeletization of the input image.
 
static void anisotropicDiffusion (Mat src, Mat dst, float alpha, float K, int niters)
 Performs anisotropic diffusion on an image.
 
static void createQuaternionImage (Mat img, Mat qimg)
 creates a quaternion image.
 
static void qconj (Mat qimg, Mat qcimg)
 calculates conjugate of a quaternion image.
 
static void qunitary (Mat qimg, Mat qnimg)
 divides each element by its modulus.
 
static void qmultiply (Mat src1, Mat src2, Mat dst)
 Calculates the per-element quaternion product of two arrays.
 
static void qdft (Mat img, Mat qimg, int flags, bool sideLeft)
 Performs a forward or inverse Discrete quaternion Fourier transform of a 2D quaternion array.
 
static void colorMatchTemplate (Mat img, Mat templ, Mat result)
 Compares a color template against overlapped color image regions.
 
static void GradientDericheY (Mat op, Mat dst, double alpha, double omega)
 Applies Y Deriche filter to an image.
 
static void GradientDericheX (Mat op, Mat dst, double alpha, double omega)
 Applies X Deriche filter to an image.
 
static DisparityWLSFilter createDisparityWLSFilter (StereoMatcher matcher_left)
 Convenience factory method that creates an instance of DisparityWLSFilter and sets up all the relevant filter parameters automatically based on the matcher instance. Currently supports only StereoBM and StereoSGBM.
 
static StereoMatcher createRightMatcher (StereoMatcher matcher_left)
 Convenience method to set up the matcher for computing the right-view disparity map that is required in case of filtering with confidence.
 
static DisparityWLSFilter createDisparityWLSFilterGeneric (bool use_confidence)
 More generic factory method, create instance of DisparityWLSFilter and execute basic initialization routines. When using this method you will need to set-up the ROI, matchers and other parameters by yourself.
 
static int readGT (string src_path, Mat dst)
 Function for reading ground truth disparity maps. Supports basic Middlebury and MPI-Sintel formats. Note that the resulting disparity map is scaled by 16.
 
static double computeMSE (Mat GT, Mat src, Rect ROI)
 Function for computing mean square error for disparity maps.
 
static double computeBadPixelPercent (Mat GT, Mat src, Rect ROI, int thresh)
 Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)
 
static double computeBadPixelPercent (Mat GT, Mat src, Rect ROI)
 Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)
 
static void getDisparityVis (Mat src, Mat dst, double scale)
 Function for creating a disparity map visualization (clamped CV_8U image)
 
static void getDisparityVis (Mat src, Mat dst)
 Function for creating a disparity map visualization (clamped CV_8U image)
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta, float eta, float minScore, int maxBoxes, float edgeMinMag, float edgeMergeThr, float clusterMinMag, float maxAspectRatio, float minBoxArea, float gamma, float kappa)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta, float eta, float minScore, int maxBoxes, float edgeMinMag, float edgeMergeThr, float clusterMinMag, float maxAspectRatio, float minBoxArea, float gamma)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta, float eta, float minScore, int maxBoxes, float edgeMinMag, float edgeMergeThr, float clusterMinMag, float maxAspectRatio, float minBoxArea)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta, float eta, float minScore, int maxBoxes, float edgeMinMag, float edgeMergeThr, float clusterMinMag, float maxAspectRatio)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta, float eta, float minScore, int maxBoxes, float edgeMinMag, float edgeMergeThr, float clusterMinMag)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta, float eta, float minScore, int maxBoxes, float edgeMinMag, float edgeMergeThr)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta, float eta, float minScore, int maxBoxes, float edgeMinMag)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta, float eta, float minScore, int maxBoxes)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta, float eta, float minScore)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta, float eta)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha, float beta)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes (float alpha)
 Creates a Edgeboxes.
 
static EdgeBoxes createEdgeBoxes ()
 Creates a Edgeboxes.
 
static void edgePreservingFilter (Mat src, Mat dst, int d, double threshold)
 Smoothes an image using the Edge-Preserving filter.
 
static EdgeDrawing createEdgeDrawing ()
 Creates a smart pointer to a EdgeDrawing object and initializes it.
 
static DTFilter createDTFilter (Mat guide, double sigmaSpatial, double sigmaColor, int mode, int numIters)
 Factory method, create instance of DTFilter and produce initialization routines.
 
static DTFilter createDTFilter (Mat guide, double sigmaSpatial, double sigmaColor, int mode)
 Factory method, create instance of DTFilter and produce initialization routines.
 
static DTFilter createDTFilter (Mat guide, double sigmaSpatial, double sigmaColor)
 Factory method, create instance of DTFilter and produce initialization routines.
 
static void dtFilter (Mat guide, Mat src, Mat dst, double sigmaSpatial, double sigmaColor, int mode, int numIters)
 Simple one-line Domain Transform filter call. If you have multiple images to filter with the same guided image then use DTFilter interface to avoid extra computations on initialization stage.
 
static void dtFilter (Mat guide, Mat src, Mat dst, double sigmaSpatial, double sigmaColor, int mode)
 Simple one-line Domain Transform filter call. If you have multiple images to filter with the same guided image then use DTFilter interface to avoid extra computations on initialization stage.
 
static void dtFilter (Mat guide, Mat src, Mat dst, double sigmaSpatial, double sigmaColor)
 Simple one-line Domain Transform filter call. If you have multiple images to filter with the same guided image then use DTFilter interface to avoid extra computations on initialization stage.
 
static GuidedFilter createGuidedFilter (Mat guide, int radius, double eps, double scale)
 Factory method, create instance of GuidedFilter and produce initialization routines.
 
static GuidedFilter createGuidedFilter (Mat guide, int radius, double eps)
 Factory method, create instance of GuidedFilter and produce initialization routines.
 
static void guidedFilter (Mat guide, Mat src, Mat dst, int radius, double eps, int dDepth, double scale)
 Simple one-line (Fast) Guided Filter call.
 
static void guidedFilter (Mat guide, Mat src, Mat dst, int radius, double eps, int dDepth)
 Simple one-line (Fast) Guided Filter call.
 
static void guidedFilter (Mat guide, Mat src, Mat dst, int radius, double eps)
 Simple one-line (Fast) Guided Filter call.
 
static AdaptiveManifoldFilter createAMFilter (double sigma_s, double sigma_r, bool adjust_outliers)
 Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines.
 
static AdaptiveManifoldFilter createAMFilter (double sigma_s, double sigma_r)
 Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines.
 
static void amFilter (Mat joint, Mat src, Mat dst, double sigma_s, double sigma_r, bool adjust_outliers)
 Simple one-line Adaptive Manifold Filter call.
 
static void amFilter (Mat joint, Mat src, Mat dst, double sigma_s, double sigma_r)
 Simple one-line Adaptive Manifold Filter call.
 
static void jointBilateralFilter (Mat joint, Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace, int borderType)
 Applies the joint bilateral filter to an image.
 
static void jointBilateralFilter (Mat joint, Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace)
 Applies the joint bilateral filter to an image.
 
static void bilateralTextureFilter (Mat src, Mat dst, int fr, int numIter, double sigmaAlpha, double sigmaAvg)
 Applies the bilateral texture filter to an image. It performs structure-preserving texture filter. For more details about this filter see [Cho2014].
 
static void bilateralTextureFilter (Mat src, Mat dst, int fr, int numIter, double sigmaAlpha)
 Applies the bilateral texture filter to an image. It performs structure-preserving texture filter. For more details about this filter see [Cho2014].
 
static void bilateralTextureFilter (Mat src, Mat dst, int fr, int numIter)
 Applies the bilateral texture filter to an image. It performs structure-preserving texture filter. For more details about this filter see [Cho2014].
 
static void bilateralTextureFilter (Mat src, Mat dst, int fr)
 Applies the bilateral texture filter to an image. It performs structure-preserving texture filter. For more details about this filter see [Cho2014].
 
static void bilateralTextureFilter (Mat src, Mat dst)
 Applies the bilateral texture filter to an image. It performs structure-preserving texture filter. For more details about this filter see [Cho2014].
 
static void rollingGuidanceFilter (Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace, int numOfIter, int borderType)
 Applies the rolling guidance filter to an image.
 
static void rollingGuidanceFilter (Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace, int numOfIter)
 Applies the rolling guidance filter to an image.
 
static void rollingGuidanceFilter (Mat src, Mat dst, int d, double sigmaColor, double sigmaSpace)
 Applies the rolling guidance filter to an image.
 
static void rollingGuidanceFilter (Mat src, Mat dst, int d, double sigmaColor)
 Applies the rolling guidance filter to an image.
 
static void rollingGuidanceFilter (Mat src, Mat dst, int d)
 Applies the rolling guidance filter to an image.
 
static void rollingGuidanceFilter (Mat src, Mat dst)
 Applies the rolling guidance filter to an image.
 
static FastBilateralSolverFilter createFastBilateralSolverFilter (Mat guide, double sigma_spatial, double sigma_luma, double sigma_chroma, double lambda, int num_iter, double max_tol)
 Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.
 
static FastBilateralSolverFilter createFastBilateralSolverFilter (Mat guide, double sigma_spatial, double sigma_luma, double sigma_chroma, double lambda, int num_iter)
 Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.
 
static FastBilateralSolverFilter createFastBilateralSolverFilter (Mat guide, double sigma_spatial, double sigma_luma, double sigma_chroma, double lambda)
 Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.
 
static FastBilateralSolverFilter createFastBilateralSolverFilter (Mat guide, double sigma_spatial, double sigma_luma, double sigma_chroma)
 Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.
 
static void fastBilateralSolverFilter (Mat guide, Mat src, Mat confidence, Mat dst, double sigma_spatial, double sigma_luma, double sigma_chroma, double lambda, int num_iter, double max_tol)
 Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.
 
static void fastBilateralSolverFilter (Mat guide, Mat src, Mat confidence, Mat dst, double sigma_spatial, double sigma_luma, double sigma_chroma, double lambda, int num_iter)
 Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.
 
static void fastBilateralSolverFilter (Mat guide, Mat src, Mat confidence, Mat dst, double sigma_spatial, double sigma_luma, double sigma_chroma, double lambda)
 Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.
 
static void fastBilateralSolverFilter (Mat guide, Mat src, Mat confidence, Mat dst, double sigma_spatial, double sigma_luma, double sigma_chroma)
 Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.
 
static void fastBilateralSolverFilter (Mat guide, Mat src, Mat confidence, Mat dst, double sigma_spatial, double sigma_luma)
 Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.
 
static void fastBilateralSolverFilter (Mat guide, Mat src, Mat confidence, Mat dst, double sigma_spatial)
 Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.
 
static void fastBilateralSolverFilter (Mat guide, Mat src, Mat confidence, Mat dst)
 Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.
 
static FastGlobalSmootherFilter createFastGlobalSmootherFilter (Mat guide, double lambda, double sigma_color, double lambda_attenuation, int num_iter)
 Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.
 
static FastGlobalSmootherFilter createFastGlobalSmootherFilter (Mat guide, double lambda, double sigma_color, double lambda_attenuation)
 Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.
 
static FastGlobalSmootherFilter createFastGlobalSmootherFilter (Mat guide, double lambda, double sigma_color)
 Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.
 
static void fastGlobalSmootherFilter (Mat guide, Mat src, Mat dst, double lambda, double sigma_color, double lambda_attenuation, int num_iter)
 Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same guide then use FastGlobalSmootherFilter interface to avoid extra computations.
 
static void fastGlobalSmootherFilter (Mat guide, Mat src, Mat dst, double lambda, double sigma_color, double lambda_attenuation)
 Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same guide then use FastGlobalSmootherFilter interface to avoid extra computations.
 
static void fastGlobalSmootherFilter (Mat guide, Mat src, Mat dst, double lambda, double sigma_color)
 Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same guide then use FastGlobalSmootherFilter interface to avoid extra computations.
 
static void l0Smooth (Mat src, Mat dst, double lambda, double kappa)
 Global image smoothing via L0 gradient minimization.
 
static void l0Smooth (Mat src, Mat dst, double lambda)
 Global image smoothing via L0 gradient minimization.
 
static void l0Smooth (Mat src, Mat dst)
 Global image smoothing via L0 gradient minimization.
 
static void covarianceEstimation (Mat src, Mat dst, int windowRows, int windowCols)
 Computes the estimated covariance matrix of an image using the sliding window forumlation.
 
static void FastHoughTransform (Mat src, Mat dst, int dstMatDepth, int angleRange, int op, int makeSkew)
 Calculates 2D Fast Hough transform of an image.
 
static void FastHoughTransform (Mat src, Mat dst, int dstMatDepth, int angleRange, int op)
 Calculates 2D Fast Hough transform of an image.
 
static void FastHoughTransform (Mat src, Mat dst, int dstMatDepth, int angleRange)
 Calculates 2D Fast Hough transform of an image.
 
static void FastHoughTransform (Mat src, Mat dst, int dstMatDepth)
 Calculates 2D Fast Hough transform of an image.
 
static FastLineDetector createFastLineDetector (int length_threshold, float distance_threshold, double canny_th1, double canny_th2, int canny_aperture_size, bool do_merge)
 Creates a smart pointer to a FastLineDetector object and initializes it.
 
static FastLineDetector createFastLineDetector (int length_threshold, float distance_threshold, double canny_th1, double canny_th2, int canny_aperture_size)
 Creates a smart pointer to a FastLineDetector object and initializes it.
 
static FastLineDetector createFastLineDetector (int length_threshold, float distance_threshold, double canny_th1, double canny_th2)
 Creates a smart pointer to a FastLineDetector object and initializes it.
 
static FastLineDetector createFastLineDetector (int length_threshold, float distance_threshold, double canny_th1)
 Creates a smart pointer to a FastLineDetector object and initializes it.
 
static FastLineDetector createFastLineDetector (int length_threshold, float distance_threshold)
 Creates a smart pointer to a FastLineDetector object and initializes it.
 
static FastLineDetector createFastLineDetector (int length_threshold)
 Creates a smart pointer to a FastLineDetector object and initializes it.
 
static FastLineDetector createFastLineDetector ()
 Creates a smart pointer to a FastLineDetector object and initializes it.
 
static void findEllipses (Mat image, Mat ellipses, float scoreThreshold, float reliabilityThreshold, float centerDistanceThreshold)
 Finds ellipses fastly in an image using projective invariant pruning.
 
static void findEllipses (Mat image, Mat ellipses, float scoreThreshold, float reliabilityThreshold)
 Finds ellipses fastly in an image using projective invariant pruning.
 
static void findEllipses (Mat image, Mat ellipses, float scoreThreshold)
 Finds ellipses fastly in an image using projective invariant pruning.
 
static void findEllipses (Mat image, Mat ellipses)
 Finds ellipses fastly in an image using projective invariant pruning.
 
static void fourierDescriptor (Mat src, Mat dst, int nbElt, int nbFD)
 Fourier descriptors for planed closed curves.
 
static void fourierDescriptor (Mat src, Mat dst, int nbElt)
 Fourier descriptors for planed closed curves.
 
static void fourierDescriptor (Mat src, Mat dst)
 Fourier descriptors for planed closed curves.
 
static void transformFD (Mat src, Mat t, Mat dst, bool fdContour)
 transform a contour
 
static void transformFD (Mat src, Mat t, Mat dst)
 transform a contour
 
static void contourSampling (Mat src, Mat _out, int nbElt)
 Contour sampling .
 
static ContourFitting createContourFitting (int ctr, int fd)
 create ContourFitting algorithm object
 
static ContourFitting createContourFitting (int ctr)
 create ContourFitting algorithm object
 
static ContourFitting createContourFitting ()
 create ContourFitting algorithm object
 
static SuperpixelLSC createSuperpixelLSC (Mat image, int region_size, float ratio)
 Class implementing the LSC (Linear Spectral Clustering) superpixels.
 
static SuperpixelLSC createSuperpixelLSC (Mat image, int region_size)
 Class implementing the LSC (Linear Spectral Clustering) superpixels.
 
static SuperpixelLSC createSuperpixelLSC (Mat image)
 Class implementing the LSC (Linear Spectral Clustering) superpixels.
 
static void PeiLinNormalization (Mat I, Mat T)
 
static void RadonTransform (Mat src, Mat dst, double theta, double start_angle, double end_angle, bool crop, bool norm)
 Calculate Radon Transform of an image.
 
static void RadonTransform (Mat src, Mat dst, double theta, double start_angle, double end_angle, bool crop)
 Calculate Radon Transform of an image.
 
static void RadonTransform (Mat src, Mat dst, double theta, double start_angle, double end_angle)
 Calculate Radon Transform of an image.
 
static void RadonTransform (Mat src, Mat dst, double theta, double start_angle)
 Calculate Radon Transform of an image.
 
static void RadonTransform (Mat src, Mat dst, double theta)
 Calculate Radon Transform of an image.
 
static void RadonTransform (Mat src, Mat dst)
 Calculate Radon Transform of an image.
 
static ScanSegment createScanSegment (int image_width, int image_height, int num_superpixels, int slices, bool merge_small)
 Initializes a ScanSegment object.
 
static ScanSegment createScanSegment (int image_width, int image_height, int num_superpixels, int slices)
 Initializes a ScanSegment object.
 
static ScanSegment createScanSegment (int image_width, int image_height, int num_superpixels)
 Initializes a ScanSegment object.
 
static SuperpixelSEEDS createSuperpixelSEEDS (int image_width, int image_height, int image_channels, int num_superpixels, int num_levels, int prior, int histogram_bins, bool double_step)
 Initializes a SuperpixelSEEDS object.
 
static SuperpixelSEEDS createSuperpixelSEEDS (int image_width, int image_height, int image_channels, int num_superpixels, int num_levels, int prior, int histogram_bins)
 Initializes a SuperpixelSEEDS object.
 
static SuperpixelSEEDS createSuperpixelSEEDS (int image_width, int image_height, int image_channels, int num_superpixels, int num_levels, int prior)
 Initializes a SuperpixelSEEDS object.
 
static SuperpixelSEEDS createSuperpixelSEEDS (int image_width, int image_height, int image_channels, int num_superpixels, int num_levels)
 Initializes a SuperpixelSEEDS object.
 
static GraphSegmentation createGraphSegmentation (double sigma, float k, int min_size)
 Creates a graph based segmentor.
 
static GraphSegmentation createGraphSegmentation (double sigma, float k)
 Creates a graph based segmentor.
 
static GraphSegmentation createGraphSegmentation (double sigma)
 Creates a graph based segmentor.
 
static GraphSegmentation createGraphSegmentation ()
 Creates a graph based segmentor.
 
static SelectiveSearchSegmentationStrategyColor createSelectiveSearchSegmentationStrategyColor ()
 Create a new color-based strategy.
 
static SelectiveSearchSegmentationStrategySize createSelectiveSearchSegmentationStrategySize ()
 Create a new size-based strategy.
 
static SelectiveSearchSegmentationStrategyTexture createSelectiveSearchSegmentationStrategyTexture ()
 Create a new size-based strategy.
 
static SelectiveSearchSegmentationStrategyFill createSelectiveSearchSegmentationStrategyFill ()
 Create a new fill-based strategy.
 
static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple ()
 Create a new multiple strategy.
 
static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple (SelectiveSearchSegmentationStrategy s1)
 Create a new multiple strategy and set one subtrategy.
 
static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple (SelectiveSearchSegmentationStrategy s1, SelectiveSearchSegmentationStrategy s2)
 Create a new multiple strategy and set two subtrategies, with equal weights.
 
static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple (SelectiveSearchSegmentationStrategy s1, SelectiveSearchSegmentationStrategy s2, SelectiveSearchSegmentationStrategy s3)
 Create a new multiple strategy and set three subtrategies, with equal weights.
 
static SelectiveSearchSegmentationStrategyMultiple createSelectiveSearchSegmentationStrategyMultiple (SelectiveSearchSegmentationStrategy s1, SelectiveSearchSegmentationStrategy s2, SelectiveSearchSegmentationStrategy s3, SelectiveSearchSegmentationStrategy s4)
 Create a new multiple strategy and set four subtrategies, with equal weights.
 
static SelectiveSearchSegmentation createSelectiveSearchSegmentation ()
 Create a new SelectiveSearchSegmentation class.
 
static SuperpixelSLIC createSuperpixelSLIC (Mat image, int algorithm, int region_size, float ruler)
 Initialize a SuperpixelSLIC object.
 
static SuperpixelSLIC createSuperpixelSLIC (Mat image, int algorithm, int region_size)
 Initialize a SuperpixelSLIC object.
 
static SuperpixelSLIC createSuperpixelSLIC (Mat image, int algorithm)
 Initialize a SuperpixelSLIC object.
 
static SuperpixelSLIC createSuperpixelSLIC (Mat image)
 Initialize a SuperpixelSLIC object.
 
static EdgeAwareInterpolator createEdgeAwareInterpolator ()
 Factory method that creates an instance of the EdgeAwareInterpolator.
 
static RICInterpolator createRICInterpolator ()
 Factory method that creates an instance of the RICInterpolator.
 
static RFFeatureGetter createRFFeatureGetter ()
 
static StructuredEdgeDetection createStructuredEdgeDetection (string model, RFFeatureGetter howToGetFeatures)
 
static StructuredEdgeDetection createStructuredEdgeDetection (string model)
 
static void weightedMedianFilter (Mat joint, Mat src, Mat dst, int r, double sigma, int weightType, Mat mask)
 Applies weighted median filter to an image.
 
static void weightedMedianFilter (Mat joint, Mat src, Mat dst, int r, double sigma, int weightType)
 Applies weighted median filter to an image.
 
static void weightedMedianFilter (Mat joint, Mat src, Mat dst, int r, double sigma)
 Applies weighted median filter to an image.
 
static void weightedMedianFilter (Mat joint, Mat src, Mat dst, int r)
 Applies weighted median filter to an image.
 
static double computeMSE (Mat GT, Mat src, in Vec4i ROI)
 Function for computing mean square error for disparity maps.
 
static double computeBadPixelPercent (Mat GT, Mat src, in Vec4i ROI, int thresh)
 Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)
 
static double computeBadPixelPercent (Mat GT, Mat src, in Vec4i ROI)
 Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)
 
static double computeMSE (Mat GT, Mat src, in(int x, int y, int width, int height) ROI)
 Function for computing mean square error for disparity maps.
 
static double computeBadPixelPercent (Mat GT, Mat src, in(int x, int y, int width, int height) ROI, int thresh)
 Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)
 
static double computeBadPixelPercent (Mat GT, Mat src, in(int x, int y, int width, int height) ROI)
 Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)
 

Static Public Attributes

const int ARO_0_45 = 0
 
const int ARO_45_90 = 1
 
const int ARO_90_135 = 2
 
const int ARO_315_0 = 3
 
const int ARO_315_45 = 4
 
const int ARO_45_135 = 5
 
const int ARO_315_135 = 6
 
const int ARO_CTR_HOR = 7
 
const int ARO_CTR_VER = 8
 
const int DTF_NC = 0
 
const int DTF_IC = 1
 
const int DTF_RF = 2
 
const int GUIDED_FILTER = 3
 
const int AM_FILTER = 4
 
const int HDO_RAW = 0
 
const int HDO_DESKEW = 1
 
const int FHT_MIN = 0
 
const int FHT_MAX = 1
 
const int FHT_ADD = 2
 
const int FHT_AVE = 3
 
const int BINARIZATION_NIBLACK = 0
 
const int BINARIZATION_SAUVOLA = 1
 
const int BINARIZATION_WOLF = 2
 
const int BINARIZATION_NICK = 3
 
const int SLIC = 100
 
const int SLICO = 101
 
const int MSLIC = 102
 
const int THINNING_ZHANGSUEN = 0
 
const int THINNING_GUOHALL = 1
 
const int WMF_EXP = 1
 
const int WMF_IV1 = 1 << 1
 
const int WMF_IV2 = 1 << 2
 
const int WMF_COS = 1 << 3
 
const int WMF_JAC = 1 << 4
 
const int WMF_OFF = 1 << 5
 

Member Function Documentation

◆ amFilter() [1/2]

static void OpenCVForUnity.XimgprocModule.Ximgproc.amFilter ( Mat joint,
Mat src,
Mat dst,
double sigma_s,
double sigma_r )
static

Simple one-line Adaptive Manifold Filter call.

Parameters
jointjoint (also called as guided) image or array of images with any numbers of channels.
srcfiltering image with any numbers of channels.
dstoutput image.
sigma_sspatial standard deviation.
sigma_rcolor space standard deviation, it is similar to the sigma in the color space into bilateralFilter.
adjust_outliersoptional, specify perform outliers adjust operation or not, (Eq. 9) in the original paper.
Note
Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1] color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same sigmas in bilateralFilter and dtFilter functions.
See also
bilateralFilter, dtFilter, guidedFilter

◆ amFilter() [2/2]

static void OpenCVForUnity.XimgprocModule.Ximgproc.amFilter ( Mat joint,
Mat src,
Mat dst,
double sigma_s,
double sigma_r,
bool adjust_outliers )
static

Simple one-line Adaptive Manifold Filter call.

Parameters
jointjoint (also called as guided) image or array of images with any numbers of channels.
srcfiltering image with any numbers of channels.
dstoutput image.
sigma_sspatial standard deviation.
sigma_rcolor space standard deviation, it is similar to the sigma in the color space into bilateralFilter.
adjust_outliersoptional, specify perform outliers adjust operation or not, (Eq. 9) in the original paper.
Note
Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1] color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same sigmas in bilateralFilter and dtFilter functions.
See also
bilateralFilter, dtFilter, guidedFilter

◆ anisotropicDiffusion()

static void OpenCVForUnity.XimgprocModule.Ximgproc.anisotropicDiffusion ( Mat src,
Mat dst,
float alpha,
float K,
int niters )
static

Performs anisotropic diffusion on an image.

The function applies Perona-Malik anisotropic diffusion to an image. This is the solution to the partial differential equation:

\[{\frac {\partial I}{\partial t}}={\mathrm {div}}\left(c(x,y,t)\nabla I\right)=\nabla c\cdot \nabla I+c(x,y,t)\Delta I\]

Suggested functions for c(x,y,t) are:

\[c\left(\|\nabla I\|\right)=e^{{-\left(\|\nabla I\|/K\right)^{2}}}\]

or

\[ c\left(\|\nabla I\|\right)={\frac {1}{1+\left({\frac {\|\nabla I\|}{K}}\right)^{2}}} \]

Parameters
srcSource image with 3 channels.
dstDestination image of the same size and the same number of channels as src .
alphaThe amount of time to step forward by on each iteration (normally, it's between 0 and 1).
Ksensitivity to the edges
nitersThe number of iterations

◆ bilateralTextureFilter() [1/5]

static void OpenCVForUnity.XimgprocModule.Ximgproc.bilateralTextureFilter ( Mat src,
Mat dst )
static

Applies the bilateral texture filter to an image. It performs structure-preserving texture filter. For more details about this filter see [Cho2014].

Parameters
srcSource image whose depth is 8-bit UINT or 32-bit FLOAT
dstDestination image of the same size and type as src.
frRadius of kernel to be used for filtering. It should be positive integer
numIterNumber of iterations of algorithm, It should be positive integer
sigmaAlphaControls the sharpness of the weight transition from edges to smooth/texture regions, where a bigger value means sharper transition. When the value is negative, it is automatically calculated.
sigmaAvgRange blur parameter for texture blurring. Larger value makes result to be more blurred. When the value is negative, it is automatically calculated as described in the paper.
See also
rollingGuidanceFilter, bilateralFilter

◆ bilateralTextureFilter() [2/5]

static void OpenCVForUnity.XimgprocModule.Ximgproc.bilateralTextureFilter ( Mat src,
Mat dst,
int fr )
static

Applies the bilateral texture filter to an image. It performs structure-preserving texture filter. For more details about this filter see [Cho2014].

Parameters
srcSource image whose depth is 8-bit UINT or 32-bit FLOAT
dstDestination image of the same size and type as src.
frRadius of kernel to be used for filtering. It should be positive integer
numIterNumber of iterations of algorithm, It should be positive integer
sigmaAlphaControls the sharpness of the weight transition from edges to smooth/texture regions, where a bigger value means sharper transition. When the value is negative, it is automatically calculated.
sigmaAvgRange blur parameter for texture blurring. Larger value makes result to be more blurred. When the value is negative, it is automatically calculated as described in the paper.
See also
rollingGuidanceFilter, bilateralFilter

◆ bilateralTextureFilter() [3/5]

static void OpenCVForUnity.XimgprocModule.Ximgproc.bilateralTextureFilter ( Mat src,
Mat dst,
int fr,
int numIter )
static

Applies the bilateral texture filter to an image. It performs structure-preserving texture filter. For more details about this filter see [Cho2014].

Parameters
srcSource image whose depth is 8-bit UINT or 32-bit FLOAT
dstDestination image of the same size and type as src.
frRadius of kernel to be used for filtering. It should be positive integer
numIterNumber of iterations of algorithm, It should be positive integer
sigmaAlphaControls the sharpness of the weight transition from edges to smooth/texture regions, where a bigger value means sharper transition. When the value is negative, it is automatically calculated.
sigmaAvgRange blur parameter for texture blurring. Larger value makes result to be more blurred. When the value is negative, it is automatically calculated as described in the paper.
See also
rollingGuidanceFilter, bilateralFilter

◆ bilateralTextureFilter() [4/5]

static void OpenCVForUnity.XimgprocModule.Ximgproc.bilateralTextureFilter ( Mat src,
Mat dst,
int fr,
int numIter,
double sigmaAlpha )
static

Applies the bilateral texture filter to an image. It performs structure-preserving texture filter. For more details about this filter see [Cho2014].

Parameters
srcSource image whose depth is 8-bit UINT or 32-bit FLOAT
dstDestination image of the same size and type as src.
frRadius of kernel to be used for filtering. It should be positive integer
numIterNumber of iterations of algorithm, It should be positive integer
sigmaAlphaControls the sharpness of the weight transition from edges to smooth/texture regions, where a bigger value means sharper transition. When the value is negative, it is automatically calculated.
sigmaAvgRange blur parameter for texture blurring. Larger value makes result to be more blurred. When the value is negative, it is automatically calculated as described in the paper.
See also
rollingGuidanceFilter, bilateralFilter

◆ bilateralTextureFilter() [5/5]

static void OpenCVForUnity.XimgprocModule.Ximgproc.bilateralTextureFilter ( Mat src,
Mat dst,
int fr,
int numIter,
double sigmaAlpha,
double sigmaAvg )
static

Applies the bilateral texture filter to an image. It performs structure-preserving texture filter. For more details about this filter see [Cho2014].

Parameters
srcSource image whose depth is 8-bit UINT or 32-bit FLOAT
dstDestination image of the same size and type as src.
frRadius of kernel to be used for filtering. It should be positive integer
numIterNumber of iterations of algorithm, It should be positive integer
sigmaAlphaControls the sharpness of the weight transition from edges to smooth/texture regions, where a bigger value means sharper transition. When the value is negative, it is automatically calculated.
sigmaAvgRange blur parameter for texture blurring. Larger value makes result to be more blurred. When the value is negative, it is automatically calculated as described in the paper.
See also
rollingGuidanceFilter, bilateralFilter

◆ colorMatchTemplate()

static void OpenCVForUnity.XimgprocModule.Ximgproc.colorMatchTemplate ( Mat img,
Mat templ,
Mat result )
static

Compares a color template against overlapped color image regions.

Parameters
imgImage where the search is running. It must be 3 channels image
templSearched template. It must be not greater than the source image and have 3 channels
resultMap of comparison results. It must be single-channel 64-bit floating-point

◆ computeBadPixelPercent() [1/6]

static double OpenCVForUnity.XimgprocModule.Ximgproc.computeBadPixelPercent ( Mat GT,
Mat src,
in Vec4i ROI )
static

Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)

Parameters
GTground truth disparity map
srcdisparity map to evaluate
ROIregion of interest
threshthreshold used to determine "bad" pixels
Returns
returns mean square error between GT and src

◆ computeBadPixelPercent() [2/6]

static double OpenCVForUnity.XimgprocModule.Ximgproc.computeBadPixelPercent ( Mat GT,
Mat src,
in Vec4i ROI,
int thresh )
static

Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)

Parameters
GTground truth disparity map
srcdisparity map to evaluate
ROIregion of interest
threshthreshold used to determine "bad" pixels
Returns
returns mean square error between GT and src

◆ computeBadPixelPercent() [3/6]

static double OpenCVForUnity.XimgprocModule.Ximgproc.computeBadPixelPercent ( Mat GT,
Mat src,
in(int x, int y, int width, int height) ROI )
static

Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)

Parameters
GTground truth disparity map
srcdisparity map to evaluate
ROIregion of interest
threshthreshold used to determine "bad" pixels
Returns
returns mean square error between GT and src

◆ computeBadPixelPercent() [4/6]

static double OpenCVForUnity.XimgprocModule.Ximgproc.computeBadPixelPercent ( Mat GT,
Mat src,
in(int x, int y, int width, int height) ROI,
int thresh )
static

Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)

Parameters
GTground truth disparity map
srcdisparity map to evaluate
ROIregion of interest
threshthreshold used to determine "bad" pixels
Returns
returns mean square error between GT and src

◆ computeBadPixelPercent() [5/6]

static double OpenCVForUnity.XimgprocModule.Ximgproc.computeBadPixelPercent ( Mat GT,
Mat src,
Rect ROI )
static

Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)

Parameters
GTground truth disparity map
srcdisparity map to evaluate
ROIregion of interest
threshthreshold used to determine "bad" pixels
Returns
returns mean square error between GT and src

◆ computeBadPixelPercent() [6/6]

static double OpenCVForUnity.XimgprocModule.Ximgproc.computeBadPixelPercent ( Mat GT,
Mat src,
Rect ROI,
int thresh )
static

Function for computing the percent of "bad" pixels in the disparity map (pixels where error is higher than a specified threshold)

Parameters
GTground truth disparity map
srcdisparity map to evaluate
ROIregion of interest
threshthreshold used to determine "bad" pixels
Returns
returns mean square error between GT and src

◆ computeMSE() [1/3]

static double OpenCVForUnity.XimgprocModule.Ximgproc.computeMSE ( Mat GT,
Mat src,
in Vec4i ROI )
static

Function for computing mean square error for disparity maps.

Parameters
GTground truth disparity map
srcdisparity map to evaluate
ROIregion of interest
Returns
returns mean square error between GT and src

◆ computeMSE() [2/3]

static double OpenCVForUnity.XimgprocModule.Ximgproc.computeMSE ( Mat GT,
Mat src,
in(int x, int y, int width, int height) ROI )
static

Function for computing mean square error for disparity maps.

Parameters
GTground truth disparity map
srcdisparity map to evaluate
ROIregion of interest
Returns
returns mean square error between GT and src

◆ computeMSE() [3/3]

static double OpenCVForUnity.XimgprocModule.Ximgproc.computeMSE ( Mat GT,
Mat src,
Rect ROI )
static

Function for computing mean square error for disparity maps.

Parameters
GTground truth disparity map
srcdisparity map to evaluate
ROIregion of interest
Returns
returns mean square error between GT and src

◆ contourSampling()

static void OpenCVForUnity.XimgprocModule.Ximgproc.contourSampling ( Mat src,
Mat _out,
int nbElt )
static

Contour sampling .

Parameters
srccontour type vector<Point> , vector<Point2f> or vector<Point2d>
outMat of type CV_64FC2 and nbElt rows
nbEltnumber of points in out contour

◆ covarianceEstimation()

static void OpenCVForUnity.XimgprocModule.Ximgproc.covarianceEstimation ( Mat src,
Mat dst,
int windowRows,
int windowCols )
static

Computes the estimated covariance matrix of an image using the sliding window forumlation.

Parameters
srcThe source image. Input image must be of a complex type.
dstThe destination estimated covariance matrix. Output matrix will be size (windowRows*windowCols, windowRows*windowCols).
windowRowsThe number of rows in the window.
windowColsThe number of cols in the window. The window size parameters control the accuracy of the estimation. The sliding window moves over the entire image from the top-left corner to the bottom right corner. Each location of the window represents a sample. If the window is the size of the image, then this gives the exact covariance matrix. For all other cases, the sizes of the window will impact the number of samples and the number of elements in the estimated covariance matrix.

◆ createAMFilter() [1/2]

static AdaptiveManifoldFilter OpenCVForUnity.XimgprocModule.Ximgproc.createAMFilter ( double sigma_s,
double sigma_r )
static

Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines.

Parameters
sigma_sspatial standard deviation.
sigma_rcolor space standard deviation, it is similar to the sigma in the color space into bilateralFilter.
adjust_outliersoptional, specify perform outliers adjust operation or not, (Eq. 9) in the original paper.

For more details about Adaptive Manifold Filter parameters, see the original article [Gastal12] .

Note
Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1] color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same sigmas in bilateralFilter and dtFilter functions.

◆ createAMFilter() [2/2]

static AdaptiveManifoldFilter OpenCVForUnity.XimgprocModule.Ximgproc.createAMFilter ( double sigma_s,
double sigma_r,
bool adjust_outliers )
static

Factory method, create instance of AdaptiveManifoldFilter and produce some initialization routines.

Parameters
sigma_sspatial standard deviation.
sigma_rcolor space standard deviation, it is similar to the sigma in the color space into bilateralFilter.
adjust_outliersoptional, specify perform outliers adjust operation or not, (Eq. 9) in the original paper.

For more details about Adaptive Manifold Filter parameters, see the original article [Gastal12] .

Note
Joint images with CV_8U and CV_16U depth converted to images with CV_32F depth and [0; 1] color range before processing. Hence color space sigma sigma_r must be in [0; 1] range, unlike same sigmas in bilateralFilter and dtFilter functions.

◆ createContourFitting() [1/3]

static ContourFitting OpenCVForUnity.XimgprocModule.Ximgproc.createContourFitting ( )
static

create ContourFitting algorithm object

Parameters
ctrnumber of Fourier descriptors equal to number of contour points after resampling.
fdContour defining second shape (Target).

◆ createContourFitting() [2/3]

static ContourFitting OpenCVForUnity.XimgprocModule.Ximgproc.createContourFitting ( int ctr)
static

create ContourFitting algorithm object

Parameters
ctrnumber of Fourier descriptors equal to number of contour points after resampling.
fdContour defining second shape (Target).

◆ createContourFitting() [3/3]

static ContourFitting OpenCVForUnity.XimgprocModule.Ximgproc.createContourFitting ( int ctr,
int fd )
static

create ContourFitting algorithm object

Parameters
ctrnumber of Fourier descriptors equal to number of contour points after resampling.
fdContour defining second shape (Target).

◆ createDisparityWLSFilter()

static DisparityWLSFilter OpenCVForUnity.XimgprocModule.Ximgproc.createDisparityWLSFilter ( StereoMatcher matcher_left)
static

Convenience factory method that creates an instance of DisparityWLSFilter and sets up all the relevant filter parameters automatically based on the matcher instance. Currently supports only StereoBM and StereoSGBM.

Parameters
matcher_leftstereo matcher instance that will be used with the filter

◆ createDisparityWLSFilterGeneric()

static DisparityWLSFilter OpenCVForUnity.XimgprocModule.Ximgproc.createDisparityWLSFilterGeneric ( bool use_confidence)
static

More generic factory method, create instance of DisparityWLSFilter and execute basic initialization routines. When using this method you will need to set-up the ROI, matchers and other parameters by yourself.

Parameters
use_confidencefiltering with confidence requires two disparity maps (for the left and right views) and is approximately two times slower. However, quality is typically significantly better.

◆ createDTFilter() [1/3]

static DTFilter OpenCVForUnity.XimgprocModule.Ximgproc.createDTFilter ( Mat guide,
double sigmaSpatial,
double sigmaColor )
static

Factory method, create instance of DTFilter and produce initialization routines.

Parameters
guideguided image (used to build transformed distance, which describes edge structure of guided image).
sigmaSpatial\({\sigma}_H\) parameter in the original article, it's similar to the sigma in the coordinate space into bilateralFilter.
sigmaColor\({\sigma}_r\) parameter in the original article, it's similar to the sigma in the color space into bilateralFilter.
modeone form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for filtering 2D signals in the article.
numItersoptional number of iterations used for filtering, 3 is quite enough.

For more details about Domain Transform filter parameters, see the original article [Gastal11] and Domain Transform filter homepage.

◆ createDTFilter() [2/3]

static DTFilter OpenCVForUnity.XimgprocModule.Ximgproc.createDTFilter ( Mat guide,
double sigmaSpatial,
double sigmaColor,
int mode )
static

Factory method, create instance of DTFilter and produce initialization routines.

Parameters
guideguided image (used to build transformed distance, which describes edge structure of guided image).
sigmaSpatial\({\sigma}_H\) parameter in the original article, it's similar to the sigma in the coordinate space into bilateralFilter.
sigmaColor\({\sigma}_r\) parameter in the original article, it's similar to the sigma in the color space into bilateralFilter.
modeone form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for filtering 2D signals in the article.
numItersoptional number of iterations used for filtering, 3 is quite enough.

For more details about Domain Transform filter parameters, see the original article [Gastal11] and Domain Transform filter homepage.

◆ createDTFilter() [3/3]

static DTFilter OpenCVForUnity.XimgprocModule.Ximgproc.createDTFilter ( Mat guide,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters )
static

Factory method, create instance of DTFilter and produce initialization routines.

Parameters
guideguided image (used to build transformed distance, which describes edge structure of guided image).
sigmaSpatial\({\sigma}_H\) parameter in the original article, it's similar to the sigma in the coordinate space into bilateralFilter.
sigmaColor\({\sigma}_r\) parameter in the original article, it's similar to the sigma in the color space into bilateralFilter.
modeone form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for filtering 2D signals in the article.
numItersoptional number of iterations used for filtering, 3 is quite enough.

For more details about Domain Transform filter parameters, see the original article [Gastal11] and Domain Transform filter homepage.

◆ createEdgeAwareInterpolator()

static EdgeAwareInterpolator OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeAwareInterpolator ( )
static

Factory method that creates an instance of the EdgeAwareInterpolator.

◆ createEdgeBoxes() [1/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [2/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha)
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [3/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [4/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta,
float eta )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [5/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta,
float eta,
float minScore )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [6/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta,
float eta,
float minScore,
int maxBoxes )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [7/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta,
float eta,
float minScore,
int maxBoxes,
float edgeMinMag )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [8/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta,
float eta,
float minScore,
int maxBoxes,
float edgeMinMag,
float edgeMergeThr )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [9/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta,
float eta,
float minScore,
int maxBoxes,
float edgeMinMag,
float edgeMergeThr,
float clusterMinMag )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [10/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta,
float eta,
float minScore,
int maxBoxes,
float edgeMinMag,
float edgeMergeThr,
float clusterMinMag,
float maxAspectRatio )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [11/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta,
float eta,
float minScore,
int maxBoxes,
float edgeMinMag,
float edgeMergeThr,
float clusterMinMag,
float maxAspectRatio,
float minBoxArea )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [12/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta,
float eta,
float minScore,
int maxBoxes,
float edgeMinMag,
float edgeMergeThr,
float clusterMinMag,
float maxAspectRatio,
float minBoxArea,
float gamma )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeBoxes() [13/13]

static EdgeBoxes OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeBoxes ( float alpha,
float beta,
float eta,
float minScore,
int maxBoxes,
float edgeMinMag,
float edgeMergeThr,
float clusterMinMag,
float maxAspectRatio,
float minBoxArea,
float gamma,
float kappa )
static

Creates a Edgeboxes.

Parameters
alphastep size of sliding window search.
betanms threshold for object proposals.
etaadaptation rate for nms threshold.
minScoremin score of boxes to detect.
maxBoxesmax number of boxes to detect.
edgeMinMagedge min magnitude. Increase to trade off accuracy for speed.
edgeMergeThredge merge threshold. Increase to trade off accuracy for speed.
clusterMinMagcluster min magnitude. Increase to trade off accuracy for speed.
maxAspectRatiomax aspect ratio of boxes.
minBoxAreaminimum area of boxes.
gammaaffinity sensitivity.
kappascale sensitivity.

◆ createEdgeDrawing()

static EdgeDrawing OpenCVForUnity.XimgprocModule.Ximgproc.createEdgeDrawing ( )
static

Creates a smart pointer to a EdgeDrawing object and initializes it.

◆ createFastBilateralSolverFilter() [1/4]

static FastBilateralSolverFilter OpenCVForUnity.XimgprocModule.Ximgproc.createFastBilateralSolverFilter ( Mat guide,
double sigma_spatial,
double sigma_luma,
double sigma_chroma )
static

Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

◆ createFastBilateralSolverFilter() [2/4]

static FastBilateralSolverFilter OpenCVForUnity.XimgprocModule.Ximgproc.createFastBilateralSolverFilter ( Mat guide,
double sigma_spatial,
double sigma_luma,
double sigma_chroma,
double lambda )
static

Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

◆ createFastBilateralSolverFilter() [3/4]

static FastBilateralSolverFilter OpenCVForUnity.XimgprocModule.Ximgproc.createFastBilateralSolverFilter ( Mat guide,
double sigma_spatial,
double sigma_luma,
double sigma_chroma,
double lambda,
int num_iter )
static

Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

◆ createFastBilateralSolverFilter() [4/4]

static FastBilateralSolverFilter OpenCVForUnity.XimgprocModule.Ximgproc.createFastBilateralSolverFilter ( Mat guide,
double sigma_spatial,
double sigma_luma,
double sigma_chroma,
double lambda,
int num_iter,
double max_tol )
static

Factory method, create instance of FastBilateralSolverFilter and execute the initialization routines.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

◆ createFastGlobalSmootherFilter() [1/3]

static FastGlobalSmootherFilter OpenCVForUnity.XimgprocModule.Ximgproc.createFastGlobalSmootherFilter ( Mat guide,
double lambda,
double sigma_color )
static

Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
lambdaparameter defining the amount of regularization
sigma_colorparameter, that is similar to color space sigma in bilateralFilter.
lambda_attenuationinternal parameter, defining how much lambda decreases after each iteration. Normally, it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
num_iternumber of iterations used for filtering, 3 is usually enough.

For more details about Fast Global Smoother parameters, see the original paper [Min2014]. However, please note that there are several differences. Lambda attenuation described in the paper is implemented a bit differently so do not expect the results to be identical to those from the paper; sigma_color values from the paper should be multiplied by 255.0 to achieve the same effect. Also, in case of image filtering where source and guide image are the same, authors propose to dynamically update the guide image after each iteration. To maximize the performance this feature was not implemented here.

◆ createFastGlobalSmootherFilter() [2/3]

static FastGlobalSmootherFilter OpenCVForUnity.XimgprocModule.Ximgproc.createFastGlobalSmootherFilter ( Mat guide,
double lambda,
double sigma_color,
double lambda_attenuation )
static

Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
lambdaparameter defining the amount of regularization
sigma_colorparameter, that is similar to color space sigma in bilateralFilter.
lambda_attenuationinternal parameter, defining how much lambda decreases after each iteration. Normally, it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
num_iternumber of iterations used for filtering, 3 is usually enough.

For more details about Fast Global Smoother parameters, see the original paper [Min2014]. However, please note that there are several differences. Lambda attenuation described in the paper is implemented a bit differently so do not expect the results to be identical to those from the paper; sigma_color values from the paper should be multiplied by 255.0 to achieve the same effect. Also, in case of image filtering where source and guide image are the same, authors propose to dynamically update the guide image after each iteration. To maximize the performance this feature was not implemented here.

◆ createFastGlobalSmootherFilter() [3/3]

static FastGlobalSmootherFilter OpenCVForUnity.XimgprocModule.Ximgproc.createFastGlobalSmootherFilter ( Mat guide,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter )
static

Factory method, create instance of FastGlobalSmootherFilter and execute the initialization routines.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
lambdaparameter defining the amount of regularization
sigma_colorparameter, that is similar to color space sigma in bilateralFilter.
lambda_attenuationinternal parameter, defining how much lambda decreases after each iteration. Normally, it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
num_iternumber of iterations used for filtering, 3 is usually enough.

For more details about Fast Global Smoother parameters, see the original paper [Min2014]. However, please note that there are several differences. Lambda attenuation described in the paper is implemented a bit differently so do not expect the results to be identical to those from the paper; sigma_color values from the paper should be multiplied by 255.0 to achieve the same effect. Also, in case of image filtering where source and guide image are the same, authors propose to dynamically update the guide image after each iteration. To maximize the performance this feature was not implemented here.

◆ createFastLineDetector() [1/7]

static FastLineDetector OpenCVForUnity.XimgprocModule.Ximgproc.createFastLineDetector ( )
static

Creates a smart pointer to a FastLineDetector object and initializes it.

Parameters
length_thresholdSegment shorter than this will be discarded
distance_thresholdA point placed from a hypothesis line segment farther than this will be regarded as an outlier
canny_th1First threshold for hysteresis procedure in Canny()
canny_th2Second threshold for hysteresis procedure in Canny()
canny_aperture_sizeAperturesize for the sobel operator in Canny(). If zero, Canny() is not applied and the input image is taken as an edge image.
do_mergeIf true, incremental merging of segments will be performed

◆ createFastLineDetector() [2/7]

static FastLineDetector OpenCVForUnity.XimgprocModule.Ximgproc.createFastLineDetector ( int length_threshold)
static

Creates a smart pointer to a FastLineDetector object and initializes it.

Parameters
length_thresholdSegment shorter than this will be discarded
distance_thresholdA point placed from a hypothesis line segment farther than this will be regarded as an outlier
canny_th1First threshold for hysteresis procedure in Canny()
canny_th2Second threshold for hysteresis procedure in Canny()
canny_aperture_sizeAperturesize for the sobel operator in Canny(). If zero, Canny() is not applied and the input image is taken as an edge image.
do_mergeIf true, incremental merging of segments will be performed

◆ createFastLineDetector() [3/7]

static FastLineDetector OpenCVForUnity.XimgprocModule.Ximgproc.createFastLineDetector ( int length_threshold,
float distance_threshold )
static

Creates a smart pointer to a FastLineDetector object and initializes it.

Parameters
length_thresholdSegment shorter than this will be discarded
distance_thresholdA point placed from a hypothesis line segment farther than this will be regarded as an outlier
canny_th1First threshold for hysteresis procedure in Canny()
canny_th2Second threshold for hysteresis procedure in Canny()
canny_aperture_sizeAperturesize for the sobel operator in Canny(). If zero, Canny() is not applied and the input image is taken as an edge image.
do_mergeIf true, incremental merging of segments will be performed

◆ createFastLineDetector() [4/7]

static FastLineDetector OpenCVForUnity.XimgprocModule.Ximgproc.createFastLineDetector ( int length_threshold,
float distance_threshold,
double canny_th1 )
static

Creates a smart pointer to a FastLineDetector object and initializes it.

Parameters
length_thresholdSegment shorter than this will be discarded
distance_thresholdA point placed from a hypothesis line segment farther than this will be regarded as an outlier
canny_th1First threshold for hysteresis procedure in Canny()
canny_th2Second threshold for hysteresis procedure in Canny()
canny_aperture_sizeAperturesize for the sobel operator in Canny(). If zero, Canny() is not applied and the input image is taken as an edge image.
do_mergeIf true, incremental merging of segments will be performed

◆ createFastLineDetector() [5/7]

static FastLineDetector OpenCVForUnity.XimgprocModule.Ximgproc.createFastLineDetector ( int length_threshold,
float distance_threshold,
double canny_th1,
double canny_th2 )
static

Creates a smart pointer to a FastLineDetector object and initializes it.

Parameters
length_thresholdSegment shorter than this will be discarded
distance_thresholdA point placed from a hypothesis line segment farther than this will be regarded as an outlier
canny_th1First threshold for hysteresis procedure in Canny()
canny_th2Second threshold for hysteresis procedure in Canny()
canny_aperture_sizeAperturesize for the sobel operator in Canny(). If zero, Canny() is not applied and the input image is taken as an edge image.
do_mergeIf true, incremental merging of segments will be performed

◆ createFastLineDetector() [6/7]

static FastLineDetector OpenCVForUnity.XimgprocModule.Ximgproc.createFastLineDetector ( int length_threshold,
float distance_threshold,
double canny_th1,
double canny_th2,
int canny_aperture_size )
static

Creates a smart pointer to a FastLineDetector object and initializes it.

Parameters
length_thresholdSegment shorter than this will be discarded
distance_thresholdA point placed from a hypothesis line segment farther than this will be regarded as an outlier
canny_th1First threshold for hysteresis procedure in Canny()
canny_th2Second threshold for hysteresis procedure in Canny()
canny_aperture_sizeAperturesize for the sobel operator in Canny(). If zero, Canny() is not applied and the input image is taken as an edge image.
do_mergeIf true, incremental merging of segments will be performed

◆ createFastLineDetector() [7/7]

static FastLineDetector OpenCVForUnity.XimgprocModule.Ximgproc.createFastLineDetector ( int length_threshold,
float distance_threshold,
double canny_th1,
double canny_th2,
int canny_aperture_size,
bool do_merge )
static

Creates a smart pointer to a FastLineDetector object and initializes it.

Parameters
length_thresholdSegment shorter than this will be discarded
distance_thresholdA point placed from a hypothesis line segment farther than this will be regarded as an outlier
canny_th1First threshold for hysteresis procedure in Canny()
canny_th2Second threshold for hysteresis procedure in Canny()
canny_aperture_sizeAperturesize for the sobel operator in Canny(). If zero, Canny() is not applied and the input image is taken as an edge image.
do_mergeIf true, incremental merging of segments will be performed

◆ createGraphSegmentation() [1/4]

static GraphSegmentation OpenCVForUnity.XimgprocModule.Ximgproc.createGraphSegmentation ( )
static

Creates a graph based segmentor.

Parameters
sigmaThe sigma parameter, used to smooth image
kThe k parameter of the algorythm
min_sizeThe minimum size of segments

◆ createGraphSegmentation() [2/4]

static GraphSegmentation OpenCVForUnity.XimgprocModule.Ximgproc.createGraphSegmentation ( double sigma)
static

Creates a graph based segmentor.

Parameters
sigmaThe sigma parameter, used to smooth image
kThe k parameter of the algorythm
min_sizeThe minimum size of segments

◆ createGraphSegmentation() [3/4]

static GraphSegmentation OpenCVForUnity.XimgprocModule.Ximgproc.createGraphSegmentation ( double sigma,
float k )
static

Creates a graph based segmentor.

Parameters
sigmaThe sigma parameter, used to smooth image
kThe k parameter of the algorythm
min_sizeThe minimum size of segments

◆ createGraphSegmentation() [4/4]

static GraphSegmentation OpenCVForUnity.XimgprocModule.Ximgproc.createGraphSegmentation ( double sigma,
float k,
int min_size )
static

Creates a graph based segmentor.

Parameters
sigmaThe sigma parameter, used to smooth image
kThe k parameter of the algorythm
min_sizeThe minimum size of segments

◆ createGuidedFilter() [1/2]

static GuidedFilter OpenCVForUnity.XimgprocModule.Ximgproc.createGuidedFilter ( Mat guide,
int radius,
double eps )
static

Factory method, create instance of GuidedFilter and produce initialization routines.

Parameters
guideguided image (or array of images) with up to 3 channels, if it have more then 3 channels then only first 3 channels will be used.
radiusradius of Guided Filter.
epsregularization term of Guided Filter. \({eps}^2\) is similar to the sigma in the color space into bilateralFilter.
scalesubsample factor of Fast Guided Filter, use a scale less than 1 to speeds up computation with almost no visible degradation. (e.g. scale==0.5 shrinks the image by 2x inside the filter)

For more details about (Fast) Guided Filter parameters, see the original articles [Kaiming10] [Kaiming15] .

◆ createGuidedFilter() [2/2]

static GuidedFilter OpenCVForUnity.XimgprocModule.Ximgproc.createGuidedFilter ( Mat guide,
int radius,
double eps,
double scale )
static

Factory method, create instance of GuidedFilter and produce initialization routines.

Parameters
guideguided image (or array of images) with up to 3 channels, if it have more then 3 channels then only first 3 channels will be used.
radiusradius of Guided Filter.
epsregularization term of Guided Filter. \({eps}^2\) is similar to the sigma in the color space into bilateralFilter.
scalesubsample factor of Fast Guided Filter, use a scale less than 1 to speeds up computation with almost no visible degradation. (e.g. scale==0.5 shrinks the image by 2x inside the filter)

For more details about (Fast) Guided Filter parameters, see the original articles [Kaiming10] [Kaiming15] .

◆ createQuaternionImage()

static void OpenCVForUnity.XimgprocModule.Ximgproc.createQuaternionImage ( Mat img,
Mat qimg )
static

creates a quaternion image.

Parameters
imgSource 8-bit, 32-bit or 64-bit image, with 3-channel image.
qimgresult CV_64FC4 a quaternion image( 4 chanels zero channel and B,G,R).

◆ createRFFeatureGetter()

static RFFeatureGetter OpenCVForUnity.XimgprocModule.Ximgproc.createRFFeatureGetter ( )
static

◆ createRICInterpolator()

static RICInterpolator OpenCVForUnity.XimgprocModule.Ximgproc.createRICInterpolator ( )
static

Factory method that creates an instance of the RICInterpolator.

◆ createRightMatcher()

static StereoMatcher OpenCVForUnity.XimgprocModule.Ximgproc.createRightMatcher ( StereoMatcher matcher_left)
static

Convenience method to set up the matcher for computing the right-view disparity map that is required in case of filtering with confidence.

Parameters
matcher_leftmain stereo matcher instance that will be used with the filter

◆ createScanSegment() [1/3]

static ScanSegment OpenCVForUnity.XimgprocModule.Ximgproc.createScanSegment ( int image_width,
int image_height,
int num_superpixels )
static

Initializes a ScanSegment object.

The function initializes a ScanSegment object for the input image. It stores the parameters of the image: image_width and image_height. It also sets the parameters of the F-DBSCAN superpixel algorithm, which are: num_superpixels, threads, and merge_small.

Parameters
image_widthImage width.
image_heightImage height.
num_superpixelsDesired number of superpixels. Note that the actual number may be smaller due to restrictions (depending on the image size). Use getNumberOfSuperpixels() to get the actual number.
slicesNumber of processing threads for parallelisation. Setting -1 uses the maximum number of threads. In practice, four threads is enough for smaller images and eight threads for larger ones.
merge_smallmerge small segments to give the desired number of superpixels. Processing is much faster without merging, but many small segments will be left in the image.

◆ createScanSegment() [2/3]

static ScanSegment OpenCVForUnity.XimgprocModule.Ximgproc.createScanSegment ( int image_width,
int image_height,
int num_superpixels,
int slices )
static

Initializes a ScanSegment object.

The function initializes a ScanSegment object for the input image. It stores the parameters of the image: image_width and image_height. It also sets the parameters of the F-DBSCAN superpixel algorithm, which are: num_superpixels, threads, and merge_small.

Parameters
image_widthImage width.
image_heightImage height.
num_superpixelsDesired number of superpixels. Note that the actual number may be smaller due to restrictions (depending on the image size). Use getNumberOfSuperpixels() to get the actual number.
slicesNumber of processing threads for parallelisation. Setting -1 uses the maximum number of threads. In practice, four threads is enough for smaller images and eight threads for larger ones.
merge_smallmerge small segments to give the desired number of superpixels. Processing is much faster without merging, but many small segments will be left in the image.

◆ createScanSegment() [3/3]

static ScanSegment OpenCVForUnity.XimgprocModule.Ximgproc.createScanSegment ( int image_width,
int image_height,
int num_superpixels,
int slices,
bool merge_small )
static

Initializes a ScanSegment object.

The function initializes a ScanSegment object for the input image. It stores the parameters of the image: image_width and image_height. It also sets the parameters of the F-DBSCAN superpixel algorithm, which are: num_superpixels, threads, and merge_small.

Parameters
image_widthImage width.
image_heightImage height.
num_superpixelsDesired number of superpixels. Note that the actual number may be smaller due to restrictions (depending on the image size). Use getNumberOfSuperpixels() to get the actual number.
slicesNumber of processing threads for parallelisation. Setting -1 uses the maximum number of threads. In practice, four threads is enough for smaller images and eight threads for larger ones.
merge_smallmerge small segments to give the desired number of superpixels. Processing is much faster without merging, but many small segments will be left in the image.

◆ createSelectiveSearchSegmentation()

static SelectiveSearchSegmentation OpenCVForUnity.XimgprocModule.Ximgproc.createSelectiveSearchSegmentation ( )
static

Create a new SelectiveSearchSegmentation class.

◆ createSelectiveSearchSegmentationStrategyColor()

static SelectiveSearchSegmentationStrategyColor OpenCVForUnity.XimgprocModule.Ximgproc.createSelectiveSearchSegmentationStrategyColor ( )
static

Create a new color-based strategy.

◆ createSelectiveSearchSegmentationStrategyFill()

static SelectiveSearchSegmentationStrategyFill OpenCVForUnity.XimgprocModule.Ximgproc.createSelectiveSearchSegmentationStrategyFill ( )
static

Create a new fill-based strategy.

◆ createSelectiveSearchSegmentationStrategyMultiple() [1/5]

static SelectiveSearchSegmentationStrategyMultiple OpenCVForUnity.XimgprocModule.Ximgproc.createSelectiveSearchSegmentationStrategyMultiple ( )
static

Create a new multiple strategy.

◆ createSelectiveSearchSegmentationStrategyMultiple() [2/5]

static SelectiveSearchSegmentationStrategyMultiple OpenCVForUnity.XimgprocModule.Ximgproc.createSelectiveSearchSegmentationStrategyMultiple ( SelectiveSearchSegmentationStrategy s1)
static

Create a new multiple strategy and set one subtrategy.

Parameters
s1The first strategy

◆ createSelectiveSearchSegmentationStrategyMultiple() [3/5]

static SelectiveSearchSegmentationStrategyMultiple OpenCVForUnity.XimgprocModule.Ximgproc.createSelectiveSearchSegmentationStrategyMultiple ( SelectiveSearchSegmentationStrategy s1,
SelectiveSearchSegmentationStrategy s2 )
static

Create a new multiple strategy and set two subtrategies, with equal weights.

Parameters
s1The first strategy
s2The second strategy

◆ createSelectiveSearchSegmentationStrategyMultiple() [4/5]

static SelectiveSearchSegmentationStrategyMultiple OpenCVForUnity.XimgprocModule.Ximgproc.createSelectiveSearchSegmentationStrategyMultiple ( SelectiveSearchSegmentationStrategy s1,
SelectiveSearchSegmentationStrategy s2,
SelectiveSearchSegmentationStrategy s3 )
static

Create a new multiple strategy and set three subtrategies, with equal weights.

Parameters
s1The first strategy
s2The second strategy
s3The third strategy

◆ createSelectiveSearchSegmentationStrategyMultiple() [5/5]

static SelectiveSearchSegmentationStrategyMultiple OpenCVForUnity.XimgprocModule.Ximgproc.createSelectiveSearchSegmentationStrategyMultiple ( SelectiveSearchSegmentationStrategy s1,
SelectiveSearchSegmentationStrategy s2,
SelectiveSearchSegmentationStrategy s3,
SelectiveSearchSegmentationStrategy s4 )
static

Create a new multiple strategy and set four subtrategies, with equal weights.

Parameters
s1The first strategy
s2The second strategy
s3The third strategy
s4The forth strategy

◆ createSelectiveSearchSegmentationStrategySize()

static SelectiveSearchSegmentationStrategySize OpenCVForUnity.XimgprocModule.Ximgproc.createSelectiveSearchSegmentationStrategySize ( )
static

Create a new size-based strategy.

◆ createSelectiveSearchSegmentationStrategyTexture()

static SelectiveSearchSegmentationStrategyTexture OpenCVForUnity.XimgprocModule.Ximgproc.createSelectiveSearchSegmentationStrategyTexture ( )
static

Create a new size-based strategy.

◆ createStructuredEdgeDetection() [1/2]

static StructuredEdgeDetection OpenCVForUnity.XimgprocModule.Ximgproc.createStructuredEdgeDetection ( string model)
static

◆ createStructuredEdgeDetection() [2/2]

static StructuredEdgeDetection OpenCVForUnity.XimgprocModule.Ximgproc.createStructuredEdgeDetection ( string model,
RFFeatureGetter howToGetFeatures )
static

◆ createSuperpixelLSC() [1/3]

static SuperpixelLSC OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelLSC ( Mat image)
static

Class implementing the LSC (Linear Spectral Clustering) superpixels.

Parameters
imageImage to segment
region_sizeChooses an average superpixel size measured in pixels
ratioChooses the enforcement of superpixel compactness factor of superpixel

The function initializes a SuperpixelLSC object for the input image. It sets the parameters of superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image. An example of LSC is ilustrated in the following picture. For enanched results it is recommended for color images to preprocess image with little gaussian blur with a small 3 x 3 kernel and additional conversion into CieLAB color space.

image

◆ createSuperpixelLSC() [2/3]

static SuperpixelLSC OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelLSC ( Mat image,
int region_size )
static

Class implementing the LSC (Linear Spectral Clustering) superpixels.

Parameters
imageImage to segment
region_sizeChooses an average superpixel size measured in pixels
ratioChooses the enforcement of superpixel compactness factor of superpixel

The function initializes a SuperpixelLSC object for the input image. It sets the parameters of superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image. An example of LSC is ilustrated in the following picture. For enanched results it is recommended for color images to preprocess image with little gaussian blur with a small 3 x 3 kernel and additional conversion into CieLAB color space.

image

◆ createSuperpixelLSC() [3/3]

static SuperpixelLSC OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelLSC ( Mat image,
int region_size,
float ratio )
static

Class implementing the LSC (Linear Spectral Clustering) superpixels.

Parameters
imageImage to segment
region_sizeChooses an average superpixel size measured in pixels
ratioChooses the enforcement of superpixel compactness factor of superpixel

The function initializes a SuperpixelLSC object for the input image. It sets the parameters of superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image. An example of LSC is ilustrated in the following picture. For enanched results it is recommended for color images to preprocess image with little gaussian blur with a small 3 x 3 kernel and additional conversion into CieLAB color space.

image

◆ createSuperpixelSEEDS() [1/4]

static SuperpixelSEEDS OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelSEEDS ( int image_width,
int image_height,
int image_channels,
int num_superpixels,
int num_levels )
static

Initializes a SuperpixelSEEDS object.

Parameters
image_widthImage width.
image_heightImage height.
image_channelsNumber of channels of the image.
num_superpixelsDesired number of superpixels. Note that the actual number may be smaller due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to get the actual number.
num_levelsNumber of block levels. The more levels, the more accurate is the segmentation, but needs more memory and CPU time.
priorenable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior must be in the range [0, 5].
histogram_binsNumber of histogram bins.
double_stepIf true, iterate each block level twice for higher accuracy.

The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and double_step.

The number of levels in num_levels defines the amount of block levels that the algorithm use in the optimization. The initialization is a grid, in which the superpixels are equally distributed through the width and the height of the image. The larger blocks correspond to the superpixel size, and the levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels, recursively until the smaller block level. An example of initialization of 4 block levels is illustrated in the following figure.

image

◆ createSuperpixelSEEDS() [2/4]

static SuperpixelSEEDS OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelSEEDS ( int image_width,
int image_height,
int image_channels,
int num_superpixels,
int num_levels,
int prior )
static

Initializes a SuperpixelSEEDS object.

Parameters
image_widthImage width.
image_heightImage height.
image_channelsNumber of channels of the image.
num_superpixelsDesired number of superpixels. Note that the actual number may be smaller due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to get the actual number.
num_levelsNumber of block levels. The more levels, the more accurate is the segmentation, but needs more memory and CPU time.
priorenable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior must be in the range [0, 5].
histogram_binsNumber of histogram bins.
double_stepIf true, iterate each block level twice for higher accuracy.

The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and double_step.

The number of levels in num_levels defines the amount of block levels that the algorithm use in the optimization. The initialization is a grid, in which the superpixels are equally distributed through the width and the height of the image. The larger blocks correspond to the superpixel size, and the levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels, recursively until the smaller block level. An example of initialization of 4 block levels is illustrated in the following figure.

image

◆ createSuperpixelSEEDS() [3/4]

static SuperpixelSEEDS OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelSEEDS ( int image_width,
int image_height,
int image_channels,
int num_superpixels,
int num_levels,
int prior,
int histogram_bins )
static

Initializes a SuperpixelSEEDS object.

Parameters
image_widthImage width.
image_heightImage height.
image_channelsNumber of channels of the image.
num_superpixelsDesired number of superpixels. Note that the actual number may be smaller due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to get the actual number.
num_levelsNumber of block levels. The more levels, the more accurate is the segmentation, but needs more memory and CPU time.
priorenable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior must be in the range [0, 5].
histogram_binsNumber of histogram bins.
double_stepIf true, iterate each block level twice for higher accuracy.

The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and double_step.

The number of levels in num_levels defines the amount of block levels that the algorithm use in the optimization. The initialization is a grid, in which the superpixels are equally distributed through the width and the height of the image. The larger blocks correspond to the superpixel size, and the levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels, recursively until the smaller block level. An example of initialization of 4 block levels is illustrated in the following figure.

image

◆ createSuperpixelSEEDS() [4/4]

static SuperpixelSEEDS OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelSEEDS ( int image_width,
int image_height,
int image_channels,
int num_superpixels,
int num_levels,
int prior,
int histogram_bins,
bool double_step )
static

Initializes a SuperpixelSEEDS object.

Parameters
image_widthImage width.
image_heightImage height.
image_channelsNumber of channels of the image.
num_superpixelsDesired number of superpixels. Note that the actual number may be smaller due to restrictions (depending on the image size and num_levels). Use getNumberOfSuperpixels() to get the actual number.
num_levelsNumber of block levels. The more levels, the more accurate is the segmentation, but needs more memory and CPU time.
priorenable 3x3 shape smoothing term if >0. A larger value leads to smoother shapes. prior must be in the range [0, 5].
histogram_binsNumber of histogram bins.
double_stepIf true, iterate each block level twice for higher accuracy.

The function initializes a SuperpixelSEEDS object for the input image. It stores the parameters of the image: image_width, image_height and image_channels. It also sets the parameters of the SEEDS superpixel algorithm, which are: num_superpixels, num_levels, use_prior, histogram_bins and double_step.

The number of levels in num_levels defines the amount of block levels that the algorithm use in the optimization. The initialization is a grid, in which the superpixels are equally distributed through the width and the height of the image. The larger blocks correspond to the superpixel size, and the levels with smaller blocks are formed by dividing the larger blocks into 2 x 2 blocks of pixels, recursively until the smaller block level. An example of initialization of 4 block levels is illustrated in the following figure.

image

◆ createSuperpixelSLIC() [1/4]

static SuperpixelSLIC OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelSLIC ( Mat image)
static

Initialize a SuperpixelSLIC object.

Parameters
imageImage to segment
algorithmChooses the algorithm variant to use: SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor, while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
region_sizeChooses an average superpixel size measured in pixels
rulerChooses the enforcement of superpixel smoothness factor of superpixel

The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image. For enanched results it is recommended for color images to preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.

image

◆ createSuperpixelSLIC() [2/4]

static SuperpixelSLIC OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelSLIC ( Mat image,
int algorithm )
static

Initialize a SuperpixelSLIC object.

Parameters
imageImage to segment
algorithmChooses the algorithm variant to use: SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor, while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
region_sizeChooses an average superpixel size measured in pixels
rulerChooses the enforcement of superpixel smoothness factor of superpixel

The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image. For enanched results it is recommended for color images to preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.

image

◆ createSuperpixelSLIC() [3/4]

static SuperpixelSLIC OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelSLIC ( Mat image,
int algorithm,
int region_size )
static

Initialize a SuperpixelSLIC object.

Parameters
imageImage to segment
algorithmChooses the algorithm variant to use: SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor, while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
region_sizeChooses an average superpixel size measured in pixels
rulerChooses the enforcement of superpixel smoothness factor of superpixel

The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image. For enanched results it is recommended for color images to preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.

image

◆ createSuperpixelSLIC() [4/4]

static SuperpixelSLIC OpenCVForUnity.XimgprocModule.Ximgproc.createSuperpixelSLIC ( Mat image,
int algorithm,
int region_size,
float ruler )
static

Initialize a SuperpixelSLIC object.

Parameters
imageImage to segment
algorithmChooses the algorithm variant to use: SLIC segments image using a desired region_size, and in addition SLICO will optimize using adaptive compactness factor, while MSLIC will optimize using manifold methods resulting in more content-sensitive superpixels.
region_sizeChooses an average superpixel size measured in pixels
rulerChooses the enforcement of superpixel smoothness factor of superpixel

The function initializes a SuperpixelSLIC object for the input image. It sets the parameters of choosed superpixel algorithm, which are: region_size and ruler. It preallocate some buffers for future computing iterations over the given image. For enanched results it is recommended for color images to preprocess image with little gaussian blur using a small 3 x 3 kernel and additional conversion into CieLAB color space. An example of SLIC versus SLICO and MSLIC is ilustrated in the following picture.

image

◆ dtFilter() [1/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.dtFilter ( Mat guide,
Mat src,
Mat dst,
double sigmaSpatial,
double sigmaColor )
static

Simple one-line Domain Transform filter call. If you have multiple images to filter with the same guided image then use DTFilter interface to avoid extra computations on initialization stage.

Parameters
guideguided image (also called as joint image) with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
srcfiltering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
dstdestination image
sigmaSpatial\({\sigma}_H\) parameter in the original article, it's similar to the sigma in the coordinate space into bilateralFilter.
sigmaColor\({\sigma}_r\) parameter in the original article, it's similar to the sigma in the color space into bilateralFilter.
modeone form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for filtering 2D signals in the article.
numItersoptional number of iterations used for filtering, 3 is quite enough.

bilateralFilter, guidedFilter, amFilter

◆ dtFilter() [2/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.dtFilter ( Mat guide,
Mat src,
Mat dst,
double sigmaSpatial,
double sigmaColor,
int mode )
static

Simple one-line Domain Transform filter call. If you have multiple images to filter with the same guided image then use DTFilter interface to avoid extra computations on initialization stage.

Parameters
guideguided image (also called as joint image) with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
srcfiltering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
dstdestination image
sigmaSpatial\({\sigma}_H\) parameter in the original article, it's similar to the sigma in the coordinate space into bilateralFilter.
sigmaColor\({\sigma}_r\) parameter in the original article, it's similar to the sigma in the color space into bilateralFilter.
modeone form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for filtering 2D signals in the article.
numItersoptional number of iterations used for filtering, 3 is quite enough.

bilateralFilter, guidedFilter, amFilter

◆ dtFilter() [3/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.dtFilter ( Mat guide,
Mat src,
Mat dst,
double sigmaSpatial,
double sigmaColor,
int mode,
int numIters )
static

Simple one-line Domain Transform filter call. If you have multiple images to filter with the same guided image then use DTFilter interface to avoid extra computations on initialization stage.

Parameters
guideguided image (also called as joint image) with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
srcfiltering image with unsigned 8-bit or floating-point 32-bit depth and up to 4 channels.
dstdestination image
sigmaSpatial\({\sigma}_H\) parameter in the original article, it's similar to the sigma in the coordinate space into bilateralFilter.
sigmaColor\({\sigma}_r\) parameter in the original article, it's similar to the sigma in the color space into bilateralFilter.
modeone form three modes DTF_NC, DTF_RF and DTF_IC which corresponds to three modes for filtering 2D signals in the article.
numItersoptional number of iterations used for filtering, 3 is quite enough.

bilateralFilter, guidedFilter, amFilter

◆ edgePreservingFilter()

static void OpenCVForUnity.XimgprocModule.Ximgproc.edgePreservingFilter ( Mat src,
Mat dst,
int d,
double threshold )
static

Smoothes an image using the Edge-Preserving filter.

The function smoothes Gaussian noise as well as salt & pepper noise. For more details about this implementation, please see [ReiWoe18] Reich, S. and Wörgötter, F. and Dellen, B. (2018). A Real-Time Edge-Preserving Denoising Filter. Proceedings of the 13th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications (VISIGRAPP): Visapp, 85-94, 4. DOI: 10.5220/0006509000850094.

Parameters
srcSource 8-bit 3-channel image.
dstDestination image of the same size and type as src.
dDiameter of each pixel neighborhood that is used during filtering. Must be greater or equal 3.
thresholdThreshold, which distinguishes between noise, outliers, and data.

◆ fastBilateralSolverFilter() [1/7]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fastBilateralSolverFilter ( Mat guide,
Mat src,
Mat confidence,
Mat dst )
static

Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
confidenceconfidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
dstdestination image.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

Note
Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.

◆ fastBilateralSolverFilter() [2/7]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fastBilateralSolverFilter ( Mat guide,
Mat src,
Mat confidence,
Mat dst,
double sigma_spatial )
static

Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
confidenceconfidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
dstdestination image.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

Note
Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.

◆ fastBilateralSolverFilter() [3/7]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fastBilateralSolverFilter ( Mat guide,
Mat src,
Mat confidence,
Mat dst,
double sigma_spatial,
double sigma_luma )
static

Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
confidenceconfidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
dstdestination image.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

Note
Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.

◆ fastBilateralSolverFilter() [4/7]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fastBilateralSolverFilter ( Mat guide,
Mat src,
Mat confidence,
Mat dst,
double sigma_spatial,
double sigma_luma,
double sigma_chroma )
static

Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
confidenceconfidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
dstdestination image.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

Note
Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.

◆ fastBilateralSolverFilter() [5/7]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fastBilateralSolverFilter ( Mat guide,
Mat src,
Mat confidence,
Mat dst,
double sigma_spatial,
double sigma_luma,
double sigma_chroma,
double lambda )
static

Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
confidenceconfidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
dstdestination image.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

Note
Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.

◆ fastBilateralSolverFilter() [6/7]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fastBilateralSolverFilter ( Mat guide,
Mat src,
Mat confidence,
Mat dst,
double sigma_spatial,
double sigma_luma,
double sigma_chroma,
double lambda,
int num_iter )
static

Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
confidenceconfidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
dstdestination image.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

Note
Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.

◆ fastBilateralSolverFilter() [7/7]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fastBilateralSolverFilter ( Mat guide,
Mat src,
Mat confidence,
Mat dst,
double sigma_spatial,
double sigma_luma,
double sigma_chroma,
double lambda,
int num_iter,
double max_tol )
static

Simple one-line Fast Bilateral Solver filter call. If you have multiple images to filter with the same guide then use FastBilateralSolverFilter interface to avoid extra computations.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
confidenceconfidence image with unsigned 8-bit or floating-point 32-bit confidence and 1 channel.
dstdestination image.
sigma_spatialparameter, that is similar to spatial space sigma (bandwidth) in bilateralFilter.
sigma_lumaparameter, that is similar to luma space sigma (bandwidth) in bilateralFilter.
sigma_chromaparameter, that is similar to chroma space sigma (bandwidth) in bilateralFilter.
lambdasmoothness strength parameter for solver.
num_iternumber of iterations used for solver, 25 is usually enough.
max_tolconvergence tolerance used for solver.

For more details about the Fast Bilateral Solver parameters, see the original paper [BarronPoole2016].

Note
Confidence images with CV_8U depth are expected to in [0, 255] and CV_32F in [0, 1] range.

◆ fastGlobalSmootherFilter() [1/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fastGlobalSmootherFilter ( Mat guide,
Mat src,
Mat dst,
double lambda,
double sigma_color )
static

Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same guide then use FastGlobalSmootherFilter interface to avoid extra computations.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
dstdestination image.
lambdaparameter defining the amount of regularization
sigma_colorparameter, that is similar to color space sigma in bilateralFilter.
lambda_attenuationinternal parameter, defining how much lambda decreases after each iteration. Normally, it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
num_iternumber of iterations used for filtering, 3 is usually enough.

◆ fastGlobalSmootherFilter() [2/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fastGlobalSmootherFilter ( Mat guide,
Mat src,
Mat dst,
double lambda,
double sigma_color,
double lambda_attenuation )
static

Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same guide then use FastGlobalSmootherFilter interface to avoid extra computations.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
dstdestination image.
lambdaparameter defining the amount of regularization
sigma_colorparameter, that is similar to color space sigma in bilateralFilter.
lambda_attenuationinternal parameter, defining how much lambda decreases after each iteration. Normally, it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
num_iternumber of iterations used for filtering, 3 is usually enough.

◆ fastGlobalSmootherFilter() [3/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fastGlobalSmootherFilter ( Mat guide,
Mat src,
Mat dst,
double lambda,
double sigma_color,
double lambda_attenuation,
int num_iter )
static

Simple one-line Fast Global Smoother filter call. If you have multiple images to filter with the same guide then use FastGlobalSmootherFilter interface to avoid extra computations.

Parameters
guideimage serving as guide for filtering. It should have 8-bit depth and either 1 or 3 channels.
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point 32-bit depth and up to 4 channels.
dstdestination image.
lambdaparameter defining the amount of regularization
sigma_colorparameter, that is similar to color space sigma in bilateralFilter.
lambda_attenuationinternal parameter, defining how much lambda decreases after each iteration. Normally, it should be 0.25. Setting it to 1.0 may lead to streaking artifacts.
num_iternumber of iterations used for filtering, 3 is usually enough.

◆ FastHoughTransform() [1/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.FastHoughTransform ( Mat src,
Mat dst,
int dstMatDepth )
static

Calculates 2D Fast Hough transform of an image.

Parameters
dstThe destination image, result of transformation.
srcThe source (input) image.
dstMatDepthThe depth of destination image
opThe operation to be applied, see cv::HoughOp
angleRangeThe part of Hough space to calculate, see cv::AngleRangeOption
makeSkewSpecifies to do or not to do image skewing, see cv::HoughDeskewOption

The function calculates the fast Hough transform for full, half or quarter range of angles.

◆ FastHoughTransform() [2/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.FastHoughTransform ( Mat src,
Mat dst,
int dstMatDepth,
int angleRange )
static

Calculates 2D Fast Hough transform of an image.

Parameters
dstThe destination image, result of transformation.
srcThe source (input) image.
dstMatDepthThe depth of destination image
opThe operation to be applied, see cv::HoughOp
angleRangeThe part of Hough space to calculate, see cv::AngleRangeOption
makeSkewSpecifies to do or not to do image skewing, see cv::HoughDeskewOption

The function calculates the fast Hough transform for full, half or quarter range of angles.

◆ FastHoughTransform() [3/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.FastHoughTransform ( Mat src,
Mat dst,
int dstMatDepth,
int angleRange,
int op )
static

Calculates 2D Fast Hough transform of an image.

Parameters
dstThe destination image, result of transformation.
srcThe source (input) image.
dstMatDepthThe depth of destination image
opThe operation to be applied, see cv::HoughOp
angleRangeThe part of Hough space to calculate, see cv::AngleRangeOption
makeSkewSpecifies to do or not to do image skewing, see cv::HoughDeskewOption

The function calculates the fast Hough transform for full, half or quarter range of angles.

◆ FastHoughTransform() [4/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.FastHoughTransform ( Mat src,
Mat dst,
int dstMatDepth,
int angleRange,
int op,
int makeSkew )
static

Calculates 2D Fast Hough transform of an image.

Parameters
dstThe destination image, result of transformation.
srcThe source (input) image.
dstMatDepthThe depth of destination image
opThe operation to be applied, see cv::HoughOp
angleRangeThe part of Hough space to calculate, see cv::AngleRangeOption
makeSkewSpecifies to do or not to do image skewing, see cv::HoughDeskewOption

The function calculates the fast Hough transform for full, half or quarter range of angles.

◆ findEllipses() [1/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.findEllipses ( Mat image,
Mat ellipses )
static

Finds ellipses fastly in an image using projective invariant pruning.

The function detects ellipses in images using projective invariant pruning. For more details about this implementation, please see [jia2017fast] Jia, Qi et al, (2017). A Fast Ellipse Detector using Projective Invariant Pruning. IEEE Transactions on Image Processing.

Parameters
imageinput image, could be gray or color.
ellipsesoutput vector of found ellipses. each vector is encoded as five float $x, y, a, b, radius, score$.
scoreThresholdfloat, the threshold of ellipse score.
reliabilityThresholdfloat, the threshold of reliability.
centerDistanceThresholdfloat, the threshold of center distance.

◆ findEllipses() [2/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.findEllipses ( Mat image,
Mat ellipses,
float scoreThreshold )
static

Finds ellipses fastly in an image using projective invariant pruning.

The function detects ellipses in images using projective invariant pruning. For more details about this implementation, please see [jia2017fast] Jia, Qi et al, (2017). A Fast Ellipse Detector using Projective Invariant Pruning. IEEE Transactions on Image Processing.

Parameters
imageinput image, could be gray or color.
ellipsesoutput vector of found ellipses. each vector is encoded as five float $x, y, a, b, radius, score$.
scoreThresholdfloat, the threshold of ellipse score.
reliabilityThresholdfloat, the threshold of reliability.
centerDistanceThresholdfloat, the threshold of center distance.

◆ findEllipses() [3/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.findEllipses ( Mat image,
Mat ellipses,
float scoreThreshold,
float reliabilityThreshold )
static

Finds ellipses fastly in an image using projective invariant pruning.

The function detects ellipses in images using projective invariant pruning. For more details about this implementation, please see [jia2017fast] Jia, Qi et al, (2017). A Fast Ellipse Detector using Projective Invariant Pruning. IEEE Transactions on Image Processing.

Parameters
imageinput image, could be gray or color.
ellipsesoutput vector of found ellipses. each vector is encoded as five float $x, y, a, b, radius, score$.
scoreThresholdfloat, the threshold of ellipse score.
reliabilityThresholdfloat, the threshold of reliability.
centerDistanceThresholdfloat, the threshold of center distance.

◆ findEllipses() [4/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.findEllipses ( Mat image,
Mat ellipses,
float scoreThreshold,
float reliabilityThreshold,
float centerDistanceThreshold )
static

Finds ellipses fastly in an image using projective invariant pruning.

The function detects ellipses in images using projective invariant pruning. For more details about this implementation, please see [jia2017fast] Jia, Qi et al, (2017). A Fast Ellipse Detector using Projective Invariant Pruning. IEEE Transactions on Image Processing.

Parameters
imageinput image, could be gray or color.
ellipsesoutput vector of found ellipses. each vector is encoded as five float $x, y, a, b, radius, score$.
scoreThresholdfloat, the threshold of ellipse score.
reliabilityThresholdfloat, the threshold of reliability.
centerDistanceThresholdfloat, the threshold of center distance.

◆ fourierDescriptor() [1/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fourierDescriptor ( Mat src,
Mat dst )
static

Fourier descriptors for planed closed curves.

For more details about this implementation, please see [PersoonFu1977]

Parameters
srccontour type vector<Point> , vector<Point2f> or vector<Point2d>
dstMat of type CV_64FC2 and nbElt rows A VERIFIER
nbEltnumber of rows in dst or getOptimalDFTSize rows if nbElt=-1
nbFDnumber of FD return in dst dst = [FD(1...nbFD/2) FD(nbFD/2-nbElt+1...:nbElt)]

◆ fourierDescriptor() [2/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fourierDescriptor ( Mat src,
Mat dst,
int nbElt )
static

Fourier descriptors for planed closed curves.

For more details about this implementation, please see [PersoonFu1977]

Parameters
srccontour type vector<Point> , vector<Point2f> or vector<Point2d>
dstMat of type CV_64FC2 and nbElt rows A VERIFIER
nbEltnumber of rows in dst or getOptimalDFTSize rows if nbElt=-1
nbFDnumber of FD return in dst dst = [FD(1...nbFD/2) FD(nbFD/2-nbElt+1...:nbElt)]

◆ fourierDescriptor() [3/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.fourierDescriptor ( Mat src,
Mat dst,
int nbElt,
int nbFD )
static

Fourier descriptors for planed closed curves.

For more details about this implementation, please see [PersoonFu1977]

Parameters
srccontour type vector<Point> , vector<Point2f> or vector<Point2d>
dstMat of type CV_64FC2 and nbElt rows A VERIFIER
nbEltnumber of rows in dst or getOptimalDFTSize rows if nbElt=-1
nbFDnumber of FD return in dst dst = [FD(1...nbFD/2) FD(nbFD/2-nbElt+1...:nbElt)]

◆ getDisparityVis() [1/2]

static void OpenCVForUnity.XimgprocModule.Ximgproc.getDisparityVis ( Mat src,
Mat dst )
static

Function for creating a disparity map visualization (clamped CV_8U image)

Parameters
srcinput disparity map (CV_16S depth)
dstoutput visualization
scaledisparity map will be multiplied by this value for visualization

◆ getDisparityVis() [2/2]

static void OpenCVForUnity.XimgprocModule.Ximgproc.getDisparityVis ( Mat src,
Mat dst,
double scale )
static

Function for creating a disparity map visualization (clamped CV_8U image)

Parameters
srcinput disparity map (CV_16S depth)
dstoutput visualization
scaledisparity map will be multiplied by this value for visualization

◆ GradientDericheX()

static void OpenCVForUnity.XimgprocModule.Ximgproc.GradientDericheX ( Mat op,
Mat dst,
double alpha,
double omega )
static

Applies X Deriche filter to an image.

For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&amp;rep=rep1&amp;type=pdf

Parameters
opSource 8-bit or 16bit image, 1-channel or 3-channel image.
dstresult CV_32FC image with same number of channel than _op.
alphadouble see paper
omegadouble see paper

◆ GradientDericheY()

static void OpenCVForUnity.XimgprocModule.Ximgproc.GradientDericheY ( Mat op,
Mat dst,
double alpha,
double omega )
static

Applies Y Deriche filter to an image.

For more details about this implementation, please see http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.476.5736&amp;rep=rep1&amp;type=pdf

Parameters
opSource 8-bit or 16bit image, 1-channel or 3-channel image.
dstresult CV_32FC image with same number of channel than _op.
alphadouble see paper
omegadouble see paper

◆ guidedFilter() [1/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.guidedFilter ( Mat guide,
Mat src,
Mat dst,
int radius,
double eps )
static

Simple one-line (Fast) Guided Filter call.

If you have multiple images to filter with the same guided image then use GuidedFilter interface to avoid extra computations on initialization stage.

Parameters
guideguided image (or array of images) with up to 3 channels, if it have more then 3 channels then only first 3 channels will be used.
srcfiltering image with any numbers of channels.
dstoutput image.
radiusradius of Guided Filter.
epsregularization term of Guided Filter. \({eps}^2\) is similar to the sigma in the color space into bilateralFilter.
dDepthoptional depth of the output image.
scalesubsample factor of Fast Guided Filter, use a scale less than 1 to speeds up computation with almost no visible degradation. (e.g. scale==0.5 shrinks the image by 2x inside the filter)
See also
bilateralFilter, dtFilter, amFilter

◆ guidedFilter() [2/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.guidedFilter ( Mat guide,
Mat src,
Mat dst,
int radius,
double eps,
int dDepth )
static

Simple one-line (Fast) Guided Filter call.

If you have multiple images to filter with the same guided image then use GuidedFilter interface to avoid extra computations on initialization stage.

Parameters
guideguided image (or array of images) with up to 3 channels, if it have more then 3 channels then only first 3 channels will be used.
srcfiltering image with any numbers of channels.
dstoutput image.
radiusradius of Guided Filter.
epsregularization term of Guided Filter. \({eps}^2\) is similar to the sigma in the color space into bilateralFilter.
dDepthoptional depth of the output image.
scalesubsample factor of Fast Guided Filter, use a scale less than 1 to speeds up computation with almost no visible degradation. (e.g. scale==0.5 shrinks the image by 2x inside the filter)
See also
bilateralFilter, dtFilter, amFilter

◆ guidedFilter() [3/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.guidedFilter ( Mat guide,
Mat src,
Mat dst,
int radius,
double eps,
int dDepth,
double scale )
static

Simple one-line (Fast) Guided Filter call.

If you have multiple images to filter with the same guided image then use GuidedFilter interface to avoid extra computations on initialization stage.

Parameters
guideguided image (or array of images) with up to 3 channels, if it have more then 3 channels then only first 3 channels will be used.
srcfiltering image with any numbers of channels.
dstoutput image.
radiusradius of Guided Filter.
epsregularization term of Guided Filter. \({eps}^2\) is similar to the sigma in the color space into bilateralFilter.
dDepthoptional depth of the output image.
scalesubsample factor of Fast Guided Filter, use a scale less than 1 to speeds up computation with almost no visible degradation. (e.g. scale==0.5 shrinks the image by 2x inside the filter)
See also
bilateralFilter, dtFilter, amFilter

◆ jointBilateralFilter() [1/2]

static void OpenCVForUnity.XimgprocModule.Ximgproc.jointBilateralFilter ( Mat joint,
Mat src,
Mat dst,
int d,
double sigmaColor,
double sigmaSpace )
static

Applies the joint bilateral filter to an image.

Parameters
jointJoint 8-bit or floating-point, 1-channel or 3-channel image.
srcSource 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint image.
dstDestination image of the same size and type as src .
dDiameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .
sigmaColorFilter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
sigmaSpaceFilter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .
borderType
Note
bilateralFilter and jointBilateralFilter use L1 norm to compute difference between colors.
See also
bilateralFilter, amFilter

◆ jointBilateralFilter() [2/2]

static void OpenCVForUnity.XimgprocModule.Ximgproc.jointBilateralFilter ( Mat joint,
Mat src,
Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int borderType )
static

Applies the joint bilateral filter to an image.

Parameters
jointJoint 8-bit or floating-point, 1-channel or 3-channel image.
srcSource 8-bit or floating-point, 1-channel or 3-channel image with the same depth as joint image.
dstDestination image of the same size and type as src .
dDiameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .
sigmaColorFilter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
sigmaSpaceFilter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .
borderType
Note
bilateralFilter and jointBilateralFilter use L1 norm to compute difference between colors.
See also
bilateralFilter, amFilter

◆ l0Smooth() [1/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.l0Smooth ( Mat src,
Mat dst )
static

Global image smoothing via L0 gradient minimization.

Parameters
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point depth.
dstdestination image.
lambdaparameter defining the smooth term weight.
kappaparameter defining the increasing factor of the weight of the gradient data term.

For more details about L0 Smoother, see the original paper [xu2011image].

◆ l0Smooth() [2/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.l0Smooth ( Mat src,
Mat dst,
double lambda )
static

Global image smoothing via L0 gradient minimization.

Parameters
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point depth.
dstdestination image.
lambdaparameter defining the smooth term weight.
kappaparameter defining the increasing factor of the weight of the gradient data term.

For more details about L0 Smoother, see the original paper [xu2011image].

◆ l0Smooth() [3/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.l0Smooth ( Mat src,
Mat dst,
double lambda,
double kappa )
static

Global image smoothing via L0 gradient minimization.

Parameters
srcsource image for filtering with unsigned 8-bit or signed 16-bit or floating-point depth.
dstdestination image.
lambdaparameter defining the smooth term weight.
kappaparameter defining the increasing factor of the weight of the gradient data term.

For more details about L0 Smoother, see the original paper [xu2011image].

◆ niBlackThreshold() [1/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.niBlackThreshold ( Mat _src,
Mat _dst,
double maxValue,
int type,
int blockSize,
double k )
static

Performs thresholding on input images using Niblack's technique or some of the popular variations it inspired.

The function transforms a grayscale image to a binary image according to the formulae:

  • THRESH_BINARY

    \[dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}\]

  • THRESH_BINARY_INV

    \[dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}\]

    where \(T(x,y)\) is a threshold calculated individually for each pixel.

The threshold value \(T(x, y)\) is determined based on the binarization method chosen. For classic Niblack, it is the mean minus \( k \) times standard deviation of \(\texttt{blockSize} \times\texttt{blockSize}\) neighborhood of \((x, y)\).

The function can't process the image in-place.

Parameters
_srcSource 8-bit single-channel image.
_dstDestination image of the same size and the same type as src.
maxValueNon-zero value assigned to the pixels for which the condition is satisfied, used with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.
typeThresholding type, see cv::ThresholdTypes.
blockSizeSize of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
kThe user-adjustable parameter used by Niblack and inspired techniques. For Niblack, this is normally a value between 0 and 1 that is multiplied with the standard deviation and subtracted from the mean.
binarizationMethodBinarization method to use. By default, Niblack's technique is used. Other techniques can be specified, see cv::ximgproc::LocalBinarizationMethods.
rThe user-adjustable parameter used by Sauvola's technique. This is the dynamic range of standard deviation.

threshold, adaptiveThreshold

◆ niBlackThreshold() [2/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.niBlackThreshold ( Mat _src,
Mat _dst,
double maxValue,
int type,
int blockSize,
double k,
int binarizationMethod )
static

Performs thresholding on input images using Niblack's technique or some of the popular variations it inspired.

The function transforms a grayscale image to a binary image according to the formulae:

  • THRESH_BINARY

    \[dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}\]

  • THRESH_BINARY_INV

    \[dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}\]

    where \(T(x,y)\) is a threshold calculated individually for each pixel.

The threshold value \(T(x, y)\) is determined based on the binarization method chosen. For classic Niblack, it is the mean minus \( k \) times standard deviation of \(\texttt{blockSize} \times\texttt{blockSize}\) neighborhood of \((x, y)\).

The function can't process the image in-place.

Parameters
_srcSource 8-bit single-channel image.
_dstDestination image of the same size and the same type as src.
maxValueNon-zero value assigned to the pixels for which the condition is satisfied, used with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.
typeThresholding type, see cv::ThresholdTypes.
blockSizeSize of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
kThe user-adjustable parameter used by Niblack and inspired techniques. For Niblack, this is normally a value between 0 and 1 that is multiplied with the standard deviation and subtracted from the mean.
binarizationMethodBinarization method to use. By default, Niblack's technique is used. Other techniques can be specified, see cv::ximgproc::LocalBinarizationMethods.
rThe user-adjustable parameter used by Sauvola's technique. This is the dynamic range of standard deviation.

threshold, adaptiveThreshold

◆ niBlackThreshold() [3/3]

static void OpenCVForUnity.XimgprocModule.Ximgproc.niBlackThreshold ( Mat _src,
Mat _dst,
double maxValue,
int type,
int blockSize,
double k,
int binarizationMethod,
double r )
static

Performs thresholding on input images using Niblack's technique or some of the popular variations it inspired.

The function transforms a grayscale image to a binary image according to the formulae:

  • THRESH_BINARY

    \[dst(x,y) = \fork{\texttt{maxValue}}{if \(src(x,y) > T(x,y)\)}{0}{otherwise}\]

  • THRESH_BINARY_INV

    \[dst(x,y) = \fork{0}{if \(src(x,y) > T(x,y)\)}{\texttt{maxValue}}{otherwise}\]

    where \(T(x,y)\) is a threshold calculated individually for each pixel.

The threshold value \(T(x, y)\) is determined based on the binarization method chosen. For classic Niblack, it is the mean minus \( k \) times standard deviation of \(\texttt{blockSize} \times\texttt{blockSize}\) neighborhood of \((x, y)\).

The function can't process the image in-place.

Parameters
_srcSource 8-bit single-channel image.
_dstDestination image of the same size and the same type as src.
maxValueNon-zero value assigned to the pixels for which the condition is satisfied, used with the THRESH_BINARY and THRESH_BINARY_INV thresholding types.
typeThresholding type, see cv::ThresholdTypes.
blockSizeSize of a pixel neighborhood that is used to calculate a threshold value for the pixel: 3, 5, 7, and so on.
kThe user-adjustable parameter used by Niblack and inspired techniques. For Niblack, this is normally a value between 0 and 1 that is multiplied with the standard deviation and subtracted from the mean.
binarizationMethodBinarization method to use. By default, Niblack's technique is used. Other techniques can be specified, see cv::ximgproc::LocalBinarizationMethods.
rThe user-adjustable parameter used by Sauvola's technique. This is the dynamic range of standard deviation.

threshold, adaptiveThreshold

◆ PeiLinNormalization()

static void OpenCVForUnity.XimgprocModule.Ximgproc.PeiLinNormalization ( Mat I,
Mat T )
static

This is an overloaded member function, provided for convenience. It differs from the above function only in what argument(s) it accepts.

◆ qconj()

static void OpenCVForUnity.XimgprocModule.Ximgproc.qconj ( Mat qimg,
Mat qcimg )
static

calculates conjugate of a quaternion image.

Parameters
qimgquaternion image.
qcimgconjugate of qimg

◆ qdft()

static void OpenCVForUnity.XimgprocModule.Ximgproc.qdft ( Mat img,
Mat qimg,
int flags,
bool sideLeft )
static

Performs a forward or inverse Discrete quaternion Fourier transform of a 2D quaternion array.

Parameters
imgquaternion image.
qimgquaternion image in dual space.
flagsquaternion image in dual space. only DFT_INVERSE flags is supported
sideLefttrue the hypercomplex exponential is to be multiplied on the left (false on the right ).

◆ qmultiply()

static void OpenCVForUnity.XimgprocModule.Ximgproc.qmultiply ( Mat src1,
Mat src2,
Mat dst )
static

Calculates the per-element quaternion product of two arrays.

Parameters
src1quaternion image.
src2quaternion image.
dstproduct dst(I)=src1(I) . src2(I)

◆ qunitary()

static void OpenCVForUnity.XimgprocModule.Ximgproc.qunitary ( Mat qimg,
Mat qnimg )
static

divides each element by its modulus.

Parameters
qimgquaternion image.
qnimgconjugate of qimg

◆ RadonTransform() [1/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.RadonTransform ( Mat src,
Mat dst )
static

Calculate Radon Transform of an image.

Parameters
srcThe source (input) image.
dstThe destination image, result of transformation.
thetaAngle resolution of the transform in degrees.
start_angleStart angle of the transform in degrees.
end_angleEnd angle of the transform in degrees.
cropCrop the source image into a circle.
normNormalize the output Mat to grayscale and convert type to CV_8U

This function calculates the Radon Transform of a given image in any range. See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail. If the input type is CV_8U, the output will be CV_32S. If the input type is CV_32F or CV_64F, the output will be CV_64F The output size will be num_of_integral x src_diagonal_length. If crop is selected, the input image will be crop into square then circle, and output size will be num_of_integral x min_edge.

◆ RadonTransform() [2/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.RadonTransform ( Mat src,
Mat dst,
double theta )
static

Calculate Radon Transform of an image.

Parameters
srcThe source (input) image.
dstThe destination image, result of transformation.
thetaAngle resolution of the transform in degrees.
start_angleStart angle of the transform in degrees.
end_angleEnd angle of the transform in degrees.
cropCrop the source image into a circle.
normNormalize the output Mat to grayscale and convert type to CV_8U

This function calculates the Radon Transform of a given image in any range. See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail. If the input type is CV_8U, the output will be CV_32S. If the input type is CV_32F or CV_64F, the output will be CV_64F The output size will be num_of_integral x src_diagonal_length. If crop is selected, the input image will be crop into square then circle, and output size will be num_of_integral x min_edge.

◆ RadonTransform() [3/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.RadonTransform ( Mat src,
Mat dst,
double theta,
double start_angle )
static

Calculate Radon Transform of an image.

Parameters
srcThe source (input) image.
dstThe destination image, result of transformation.
thetaAngle resolution of the transform in degrees.
start_angleStart angle of the transform in degrees.
end_angleEnd angle of the transform in degrees.
cropCrop the source image into a circle.
normNormalize the output Mat to grayscale and convert type to CV_8U

This function calculates the Radon Transform of a given image in any range. See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail. If the input type is CV_8U, the output will be CV_32S. If the input type is CV_32F or CV_64F, the output will be CV_64F The output size will be num_of_integral x src_diagonal_length. If crop is selected, the input image will be crop into square then circle, and output size will be num_of_integral x min_edge.

◆ RadonTransform() [4/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.RadonTransform ( Mat src,
Mat dst,
double theta,
double start_angle,
double end_angle )
static

Calculate Radon Transform of an image.

Parameters
srcThe source (input) image.
dstThe destination image, result of transformation.
thetaAngle resolution of the transform in degrees.
start_angleStart angle of the transform in degrees.
end_angleEnd angle of the transform in degrees.
cropCrop the source image into a circle.
normNormalize the output Mat to grayscale and convert type to CV_8U

This function calculates the Radon Transform of a given image in any range. See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail. If the input type is CV_8U, the output will be CV_32S. If the input type is CV_32F or CV_64F, the output will be CV_64F The output size will be num_of_integral x src_diagonal_length. If crop is selected, the input image will be crop into square then circle, and output size will be num_of_integral x min_edge.

◆ RadonTransform() [5/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.RadonTransform ( Mat src,
Mat dst,
double theta,
double start_angle,
double end_angle,
bool crop )
static

Calculate Radon Transform of an image.

Parameters
srcThe source (input) image.
dstThe destination image, result of transformation.
thetaAngle resolution of the transform in degrees.
start_angleStart angle of the transform in degrees.
end_angleEnd angle of the transform in degrees.
cropCrop the source image into a circle.
normNormalize the output Mat to grayscale and convert type to CV_8U

This function calculates the Radon Transform of a given image in any range. See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail. If the input type is CV_8U, the output will be CV_32S. If the input type is CV_32F or CV_64F, the output will be CV_64F The output size will be num_of_integral x src_diagonal_length. If crop is selected, the input image will be crop into square then circle, and output size will be num_of_integral x min_edge.

◆ RadonTransform() [6/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.RadonTransform ( Mat src,
Mat dst,
double theta,
double start_angle,
double end_angle,
bool crop,
bool norm )
static

Calculate Radon Transform of an image.

Parameters
srcThe source (input) image.
dstThe destination image, result of transformation.
thetaAngle resolution of the transform in degrees.
start_angleStart angle of the transform in degrees.
end_angleEnd angle of the transform in degrees.
cropCrop the source image into a circle.
normNormalize the output Mat to grayscale and convert type to CV_8U

This function calculates the Radon Transform of a given image in any range. See https://engineering.purdue.edu/~malcolm/pct/CTI_Ch03.pdf for detail. If the input type is CV_8U, the output will be CV_32S. If the input type is CV_32F or CV_64F, the output will be CV_64F The output size will be num_of_integral x src_diagonal_length. If crop is selected, the input image will be crop into square then circle, and output size will be num_of_integral x min_edge.

◆ readGT()

static int OpenCVForUnity.XimgprocModule.Ximgproc.readGT ( string src_path,
Mat dst )
static

Function for reading ground truth disparity maps. Supports basic Middlebury and MPI-Sintel formats. Note that the resulting disparity map is scaled by 16.

Parameters
src_pathpath to the image, containing ground-truth disparity map
dstoutput disparity map, CV_16S depth
Returns
returns zero if successfully read the ground truth

◆ rollingGuidanceFilter() [1/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.rollingGuidanceFilter ( Mat src,
Mat dst )
static

Applies the rolling guidance filter to an image.

For more details, please see [zhang2014rolling]

Parameters
srcSource 8-bit or floating-point, 1-channel or 3-channel image.
dstDestination image of the same size and type as src.
dDiameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .
sigmaColorFilter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
sigmaSpaceFilter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .
numOfIterNumber of iterations of joint edge-preserving filtering applied on the source image.
borderType
Note
rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
See also
jointBilateralFilter, bilateralFilter, amFilter

◆ rollingGuidanceFilter() [2/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.rollingGuidanceFilter ( Mat src,
Mat dst,
int d )
static

Applies the rolling guidance filter to an image.

For more details, please see [zhang2014rolling]

Parameters
srcSource 8-bit or floating-point, 1-channel or 3-channel image.
dstDestination image of the same size and type as src.
dDiameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .
sigmaColorFilter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
sigmaSpaceFilter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .
numOfIterNumber of iterations of joint edge-preserving filtering applied on the source image.
borderType
Note
rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
See also
jointBilateralFilter, bilateralFilter, amFilter

◆ rollingGuidanceFilter() [3/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.rollingGuidanceFilter ( Mat src,
Mat dst,
int d,
double sigmaColor )
static

Applies the rolling guidance filter to an image.

For more details, please see [zhang2014rolling]

Parameters
srcSource 8-bit or floating-point, 1-channel or 3-channel image.
dstDestination image of the same size and type as src.
dDiameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .
sigmaColorFilter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
sigmaSpaceFilter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .
numOfIterNumber of iterations of joint edge-preserving filtering applied on the source image.
borderType
Note
rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
See also
jointBilateralFilter, bilateralFilter, amFilter

◆ rollingGuidanceFilter() [4/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.rollingGuidanceFilter ( Mat src,
Mat dst,
int d,
double sigmaColor,
double sigmaSpace )
static

Applies the rolling guidance filter to an image.

For more details, please see [zhang2014rolling]

Parameters
srcSource 8-bit or floating-point, 1-channel or 3-channel image.
dstDestination image of the same size and type as src.
dDiameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .
sigmaColorFilter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
sigmaSpaceFilter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .
numOfIterNumber of iterations of joint edge-preserving filtering applied on the source image.
borderType
Note
rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
See also
jointBilateralFilter, bilateralFilter, amFilter

◆ rollingGuidanceFilter() [5/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.rollingGuidanceFilter ( Mat src,
Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int numOfIter )
static

Applies the rolling guidance filter to an image.

For more details, please see [zhang2014rolling]

Parameters
srcSource 8-bit or floating-point, 1-channel or 3-channel image.
dstDestination image of the same size and type as src.
dDiameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .
sigmaColorFilter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
sigmaSpaceFilter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .
numOfIterNumber of iterations of joint edge-preserving filtering applied on the source image.
borderType
Note
rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
See also
jointBilateralFilter, bilateralFilter, amFilter

◆ rollingGuidanceFilter() [6/6]

static void OpenCVForUnity.XimgprocModule.Ximgproc.rollingGuidanceFilter ( Mat src,
Mat dst,
int d,
double sigmaColor,
double sigmaSpace,
int numOfIter,
int borderType )
static

Applies the rolling guidance filter to an image.

For more details, please see [zhang2014rolling]

Parameters
srcSource 8-bit or floating-point, 1-channel or 3-channel image.
dstDestination image of the same size and type as src.
dDiameter of each pixel neighborhood that is used during filtering. If it is non-positive, it is computed from sigmaSpace .
sigmaColorFilter sigma in the color space. A larger value of the parameter means that farther colors within the pixel neighborhood (see sigmaSpace ) will be mixed together, resulting in larger areas of semi-equal color.
sigmaSpaceFilter sigma in the coordinate space. A larger value of the parameter means that farther pixels will influence each other as long as their colors are close enough (see sigmaColor ). When d>0 , it specifies the neighborhood size regardless of sigmaSpace . Otherwise, d is proportional to sigmaSpace .
numOfIterNumber of iterations of joint edge-preserving filtering applied on the source image.
borderType
Note
rollingGuidanceFilter uses jointBilateralFilter as the edge-preserving filter.
See also
jointBilateralFilter, bilateralFilter, amFilter

◆ thinning() [1/2]

static void OpenCVForUnity.XimgprocModule.Ximgproc.thinning ( Mat src,
Mat dst )
static

Applies a binary blob thinning operation, to achieve a skeletization of the input image.

The function transforms a binary blob image into a skeletized form using the technique of Zhang-Suen.

Parameters
srcSource 8-bit single-channel image, containing binary blobs, with blobs having 255 pixel values.
dstDestination image of the same size and the same type as src. The function can work in-place.
thinningTypeValue that defines which thinning algorithm should be used. See cv::ximgproc::ThinningTypes

◆ thinning() [2/2]

static void OpenCVForUnity.XimgprocModule.Ximgproc.thinning ( Mat src,
Mat dst,
int thinningType )
static

Applies a binary blob thinning operation, to achieve a skeletization of the input image.

The function transforms a binary blob image into a skeletized form using the technique of Zhang-Suen.

Parameters
srcSource 8-bit single-channel image, containing binary blobs, with blobs having 255 pixel values.
dstDestination image of the same size and the same type as src. The function can work in-place.
thinningTypeValue that defines which thinning algorithm should be used. See cv::ximgproc::ThinningTypes

◆ transformFD() [1/2]

static void OpenCVForUnity.XimgprocModule.Ximgproc.transformFD ( Mat src,
Mat t,
Mat dst )
static

transform a contour

Parameters
srccontour or Fourier Descriptors if fd is true
ttransform Mat given by estimateTransformation
dstMat of type CV_64FC2 and nbElt rows
fdContourtrue src are Fourier Descriptors. fdContour false src is a contour

◆ transformFD() [2/2]

static void OpenCVForUnity.XimgprocModule.Ximgproc.transformFD ( Mat src,
Mat t,
Mat dst,
bool fdContour )
static

transform a contour

Parameters
srccontour or Fourier Descriptors if fd is true
ttransform Mat given by estimateTransformation
dstMat of type CV_64FC2 and nbElt rows
fdContourtrue src are Fourier Descriptors. fdContour false src is a contour

◆ weightedMedianFilter() [1/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.weightedMedianFilter ( Mat joint,
Mat src,
Mat dst,
int r )
static

Applies weighted median filter to an image.

For more details about this implementation, please see [zhang2014100+]

Parameters
jointJoint 8-bit, 1-channel or 3-channel image.
srcSource 8-bit or floating-point, 1-channel or 3-channel image.
dstDestination image.
rRadius of filtering kernel, should be a positive integer.
sigmaFilter range standard deviation for the joint image.
weightTypeweightType The type of weight definition, see WMFWeightType
maskA 0-1 mask that has the same size with I. This mask is used to ignore the effect of some pixels. If the pixel value on mask is 0, the pixel will be ignored when maintaining the joint-histogram. This is useful for applications like optical flow occlusion handling.
See also
medianBlur, jointBilateralFilter

◆ weightedMedianFilter() [2/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.weightedMedianFilter ( Mat joint,
Mat src,
Mat dst,
int r,
double sigma )
static

Applies weighted median filter to an image.

For more details about this implementation, please see [zhang2014100+]

Parameters
jointJoint 8-bit, 1-channel or 3-channel image.
srcSource 8-bit or floating-point, 1-channel or 3-channel image.
dstDestination image.
rRadius of filtering kernel, should be a positive integer.
sigmaFilter range standard deviation for the joint image.
weightTypeweightType The type of weight definition, see WMFWeightType
maskA 0-1 mask that has the same size with I. This mask is used to ignore the effect of some pixels. If the pixel value on mask is 0, the pixel will be ignored when maintaining the joint-histogram. This is useful for applications like optical flow occlusion handling.
See also
medianBlur, jointBilateralFilter

◆ weightedMedianFilter() [3/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.weightedMedianFilter ( Mat joint,
Mat src,
Mat dst,
int r,
double sigma,
int weightType )
static

Applies weighted median filter to an image.

For more details about this implementation, please see [zhang2014100+]

Parameters
jointJoint 8-bit, 1-channel or 3-channel image.
srcSource 8-bit or floating-point, 1-channel or 3-channel image.
dstDestination image.
rRadius of filtering kernel, should be a positive integer.
sigmaFilter range standard deviation for the joint image.
weightTypeweightType The type of weight definition, see WMFWeightType
maskA 0-1 mask that has the same size with I. This mask is used to ignore the effect of some pixels. If the pixel value on mask is 0, the pixel will be ignored when maintaining the joint-histogram. This is useful for applications like optical flow occlusion handling.
See also
medianBlur, jointBilateralFilter

◆ weightedMedianFilter() [4/4]

static void OpenCVForUnity.XimgprocModule.Ximgproc.weightedMedianFilter ( Mat joint,
Mat src,
Mat dst,
int r,
double sigma,
int weightType,
Mat mask )
static

Applies weighted median filter to an image.

For more details about this implementation, please see [zhang2014100+]

Parameters
jointJoint 8-bit, 1-channel or 3-channel image.
srcSource 8-bit or floating-point, 1-channel or 3-channel image.
dstDestination image.
rRadius of filtering kernel, should be a positive integer.
sigmaFilter range standard deviation for the joint image.
weightTypeweightType The type of weight definition, see WMFWeightType
maskA 0-1 mask that has the same size with I. This mask is used to ignore the effect of some pixels. If the pixel value on mask is 0, the pixel will be ignored when maintaining the joint-histogram. This is useful for applications like optical flow occlusion handling.
See also
medianBlur, jointBilateralFilter

Member Data Documentation

◆ AM_FILTER

const int OpenCVForUnity.XimgprocModule.Ximgproc.AM_FILTER = 4
static

◆ ARO_0_45

const int OpenCVForUnity.XimgprocModule.Ximgproc.ARO_0_45 = 0
static

◆ ARO_315_0

const int OpenCVForUnity.XimgprocModule.Ximgproc.ARO_315_0 = 3
static

◆ ARO_315_135

const int OpenCVForUnity.XimgprocModule.Ximgproc.ARO_315_135 = 6
static

◆ ARO_315_45

const int OpenCVForUnity.XimgprocModule.Ximgproc.ARO_315_45 = 4
static

◆ ARO_45_135

const int OpenCVForUnity.XimgprocModule.Ximgproc.ARO_45_135 = 5
static

◆ ARO_45_90

const int OpenCVForUnity.XimgprocModule.Ximgproc.ARO_45_90 = 1
static

◆ ARO_90_135

const int OpenCVForUnity.XimgprocModule.Ximgproc.ARO_90_135 = 2
static

◆ ARO_CTR_HOR

const int OpenCVForUnity.XimgprocModule.Ximgproc.ARO_CTR_HOR = 7
static

◆ ARO_CTR_VER

const int OpenCVForUnity.XimgprocModule.Ximgproc.ARO_CTR_VER = 8
static

◆ BINARIZATION_NIBLACK

const int OpenCVForUnity.XimgprocModule.Ximgproc.BINARIZATION_NIBLACK = 0
static

◆ BINARIZATION_NICK

const int OpenCVForUnity.XimgprocModule.Ximgproc.BINARIZATION_NICK = 3
static

◆ BINARIZATION_SAUVOLA

const int OpenCVForUnity.XimgprocModule.Ximgproc.BINARIZATION_SAUVOLA = 1
static

◆ BINARIZATION_WOLF

const int OpenCVForUnity.XimgprocModule.Ximgproc.BINARIZATION_WOLF = 2
static

◆ DTF_IC

const int OpenCVForUnity.XimgprocModule.Ximgproc.DTF_IC = 1
static

◆ DTF_NC

const int OpenCVForUnity.XimgprocModule.Ximgproc.DTF_NC = 0
static

◆ DTF_RF

const int OpenCVForUnity.XimgprocModule.Ximgproc.DTF_RF = 2
static

◆ FHT_ADD

const int OpenCVForUnity.XimgprocModule.Ximgproc.FHT_ADD = 2
static

◆ FHT_AVE

const int OpenCVForUnity.XimgprocModule.Ximgproc.FHT_AVE = 3
static

◆ FHT_MAX

const int OpenCVForUnity.XimgprocModule.Ximgproc.FHT_MAX = 1
static

◆ FHT_MIN

const int OpenCVForUnity.XimgprocModule.Ximgproc.FHT_MIN = 0
static

◆ GUIDED_FILTER

const int OpenCVForUnity.XimgprocModule.Ximgproc.GUIDED_FILTER = 3
static

◆ HDO_DESKEW

const int OpenCVForUnity.XimgprocModule.Ximgproc.HDO_DESKEW = 1
static

◆ HDO_RAW

const int OpenCVForUnity.XimgprocModule.Ximgproc.HDO_RAW = 0
static

◆ MSLIC

const int OpenCVForUnity.XimgprocModule.Ximgproc.MSLIC = 102
static

◆ SLIC

const int OpenCVForUnity.XimgprocModule.Ximgproc.SLIC = 100
static

◆ SLICO

const int OpenCVForUnity.XimgprocModule.Ximgproc.SLICO = 101
static

◆ THINNING_GUOHALL

const int OpenCVForUnity.XimgprocModule.Ximgproc.THINNING_GUOHALL = 1
static

◆ THINNING_ZHANGSUEN

const int OpenCVForUnity.XimgprocModule.Ximgproc.THINNING_ZHANGSUEN = 0
static

◆ WMF_COS

const int OpenCVForUnity.XimgprocModule.Ximgproc.WMF_COS = 1 << 3
static

◆ WMF_EXP

const int OpenCVForUnity.XimgprocModule.Ximgproc.WMF_EXP = 1
static

◆ WMF_IV1

const int OpenCVForUnity.XimgprocModule.Ximgproc.WMF_IV1 = 1 << 1
static

◆ WMF_IV2

const int OpenCVForUnity.XimgprocModule.Ximgproc.WMF_IV2 = 1 << 2
static

◆ WMF_JAC

const int OpenCVForUnity.XimgprocModule.Ximgproc.WMF_JAC = 1 << 4
static

◆ WMF_OFF

const int OpenCVForUnity.XimgprocModule.Ximgproc.WMF_OFF = 1 << 5
static

The documentation for this class was generated from the following files: