Modifier and Type | Method and Description |
---|---|
static Mat |
opencv_dnn.blobFromImages(MatVector images) |
static Mat |
opencv_dnn.blobFromImages(MatVector images,
double scalefactor,
Size size,
Scalar mean,
boolean swapRB,
boolean crop,
int ddepth)
\brief Creates 4-dimensional blob from series of images.
|
static void |
opencv_dnn.blobFromImages(MatVector images,
GpuMat blob) |
static void |
opencv_dnn.blobFromImages(MatVector images,
GpuMat blob,
double scalefactor,
Size size,
Scalar mean,
boolean swapRB,
boolean crop,
int ddepth) |
static void |
opencv_dnn.blobFromImages(MatVector images,
Mat blob) |
static void |
opencv_dnn.blobFromImages(MatVector images,
Mat blob,
double scalefactor,
Size size,
Scalar mean,
boolean swapRB,
boolean crop,
int ddepth)
\brief Creates 4-dimensional blob from series of images.
|
static void |
opencv_dnn.blobFromImages(MatVector images,
UMat blob) |
static void |
opencv_dnn.blobFromImages(MatVector images,
UMat blob,
double scalefactor,
Size size,
Scalar mean,
boolean swapRB,
boolean crop,
int ddepth) |
static int |
opencv_video.buildOpticalFlowPyramid(GpuMat img,
MatVector pyramid,
Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(GpuMat img,
MatVector pyramid,
Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage) |
static int |
opencv_video.buildOpticalFlowPyramid(Mat img,
MatVector pyramid,
Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(Mat img,
MatVector pyramid,
Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage)
\brief Constructs the image pyramid which can be passed to calcOpticalFlowPyrLK.
|
static int |
opencv_video.buildOpticalFlowPyramid(UMat img,
MatVector pyramid,
Size winSize,
int maxLevel) |
static int |
opencv_video.buildOpticalFlowPyramid(UMat img,
MatVector pyramid,
Size winSize,
int maxLevel,
boolean withDerivatives,
int pyrBorder,
int derivBorder,
boolean tryReuseInputImage) |
static void |
opencv_imgproc.buildPyramid(GpuMat src,
MatVector dst,
int maxlevel) |
static void |
opencv_imgproc.buildPyramid(GpuMat src,
MatVector dst,
int maxlevel,
int borderType) |
static void |
opencv_imgproc.buildPyramid(Mat src,
MatVector dst,
int maxlevel) |
static void |
opencv_imgproc.buildPyramid(Mat src,
MatVector dst,
int maxlevel,
int borderType)
\brief Constructs the Gaussian pyramid for an image.
|
static void |
opencv_imgproc.buildPyramid(UMat src,
MatVector dst,
int maxlevel) |
static void |
opencv_imgproc.buildPyramid(UMat src,
MatVector dst,
int maxlevel,
int borderType) |
static void |
opencv_imgproc.calcBackProject(MatVector images,
IntPointer channels,
GpuMat hist,
GpuMat dst,
FloatPointer ranges,
double scale) |
static void |
opencv_imgproc.calcBackProject(MatVector images,
IntPointer channels,
Mat hist,
Mat dst,
FloatPointer ranges,
double scale)
\overload
|
static void |
opencv_imgproc.calcBackProject(MatVector images,
IntPointer channels,
UMat hist,
UMat dst,
FloatPointer ranges,
double scale) |
static void |
opencv_imgproc.calcHist(MatVector images,
IntPointer channels,
GpuMat mask,
GpuMat hist,
IntPointer histSize,
FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(MatVector images,
IntPointer channels,
GpuMat mask,
GpuMat hist,
IntPointer histSize,
FloatPointer ranges,
boolean accumulate) |
static void |
opencv_imgproc.calcHist(MatVector images,
IntPointer channels,
Mat mask,
Mat hist,
IntPointer histSize,
FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(MatVector images,
IntPointer channels,
Mat mask,
Mat hist,
IntPointer histSize,
FloatPointer ranges,
boolean accumulate)
\overload
|
static void |
opencv_imgproc.calcHist(MatVector images,
IntPointer channels,
UMat mask,
UMat hist,
IntPointer histSize,
FloatPointer ranges) |
static void |
opencv_imgproc.calcHist(MatVector images,
IntPointer channels,
UMat mask,
UMat hist,
IntPointer histSize,
FloatPointer ranges,
boolean accumulate) |
static double |
opencv_calib3d.calibrate(MatVector objectPoints,
MatVector imagePoints,
Size image_size,
GpuMat K,
GpuMat D,
MatVector rvecs,
MatVector tvecs) |
static double |
opencv_calib3d.calibrate(MatVector objectPoints,
MatVector imagePoints,
Size image_size,
GpuMat K,
GpuMat D,
MatVector rvecs,
MatVector tvecs,
int flags,
TermCriteria criteria) |
static double |
opencv_calib3d.calibrate(MatVector objectPoints,
MatVector imagePoints,
Size image_size,
Mat K,
Mat D,
MatVector rvecs,
MatVector tvecs) |
static double |
opencv_calib3d.calibrate(MatVector objectPoints,
MatVector imagePoints,
Size image_size,
Mat K,
Mat D,
MatVector rvecs,
MatVector tvecs,
int flags,
TermCriteria criteria)
\brief Performs camera calibaration
|
static double |
opencv_calib3d.calibrate(MatVector objectPoints,
MatVector imagePoints,
Size image_size,
UMat K,
UMat D,
MatVector rvecs,
MatVector tvecs) |
static double |
opencv_calib3d.calibrate(MatVector objectPoints,
MatVector imagePoints,
Size image_size,
UMat K,
UMat D,
MatVector rvecs,
MatVector tvecs,
int flags,
TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCamera(Point3fVectorVector objectPoints,
Point2fVectorVector imagePoints,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs) |
static double |
opencv_calib3d.calibrateCamera(Point3fVectorVector objectPoints,
Point2fVectorVector imagePoints,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
int flags,
TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraAruco(MatVector corners,
GpuMat ids,
GpuMat counter,
Board board,
Size imageSize,
GpuMat cameraMatrix,
GpuMat distCoeffs) |
static double |
opencv_aruco.calibrateCameraAruco(MatVector corners,
GpuMat ids,
GpuMat counter,
Board board,
Size imageSize,
GpuMat cameraMatrix,
GpuMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
int flags,
TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraAruco(MatVector corners,
Mat ids,
Mat counter,
Board board,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs) |
static double |
opencv_aruco.calibrateCameraAruco(MatVector corners,
Mat ids,
Mat counter,
Board board,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
int flags,
TermCriteria criteria)
\brief It's the same function as #calibrateCameraAruco but without calibration error estimation.
|
static double |
opencv_aruco.calibrateCameraAruco(MatVector corners,
UMat ids,
UMat counter,
Board board,
Size imageSize,
UMat cameraMatrix,
UMat distCoeffs) |
static double |
opencv_aruco.calibrateCameraAruco(MatVector corners,
UMat ids,
UMat counter,
Board board,
Size imageSize,
UMat cameraMatrix,
UMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
int flags,
TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraArucoExtended(MatVector corners,
GpuMat ids,
GpuMat counter,
Board board,
Size imageSize,
GpuMat cameraMatrix,
GpuMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
GpuMat stdDeviationsIntrinsics,
GpuMat stdDeviationsExtrinsics,
GpuMat perViewErrors) |
static double |
opencv_aruco.calibrateCameraArucoExtended(MatVector corners,
GpuMat ids,
GpuMat counter,
Board board,
Size imageSize,
GpuMat cameraMatrix,
GpuMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
GpuMat stdDeviationsIntrinsics,
GpuMat stdDeviationsExtrinsics,
GpuMat perViewErrors,
int flags,
TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraArucoExtended(MatVector corners,
Mat ids,
Mat counter,
Board board,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat perViewErrors) |
static double |
opencv_aruco.calibrateCameraArucoExtended(MatVector corners,
Mat ids,
Mat counter,
Board board,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat perViewErrors,
int flags,
TermCriteria criteria)
\brief Calibrate a camera using aruco markers
|
static double |
opencv_aruco.calibrateCameraArucoExtended(MatVector corners,
UMat ids,
UMat counter,
Board board,
Size imageSize,
UMat cameraMatrix,
UMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
UMat stdDeviationsIntrinsics,
UMat stdDeviationsExtrinsics,
UMat perViewErrors) |
static double |
opencv_aruco.calibrateCameraArucoExtended(MatVector corners,
UMat ids,
UMat counter,
Board board,
Size imageSize,
UMat cameraMatrix,
UMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
UMat stdDeviationsIntrinsics,
UMat stdDeviationsExtrinsics,
UMat perViewErrors,
int flags,
TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraCharuco(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
GpuMat cameraMatrix,
GpuMat distCoeffs) |
static double |
opencv_aruco.calibrateCameraCharuco(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
GpuMat cameraMatrix,
GpuMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
int flags,
TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraCharuco(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs) |
static double |
opencv_aruco.calibrateCameraCharuco(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
int flags,
TermCriteria criteria)
\brief It's the same function as #calibrateCameraCharuco but without calibration error estimation.
|
static double |
opencv_aruco.calibrateCameraCharuco(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
UMat cameraMatrix,
UMat distCoeffs) |
static double |
opencv_aruco.calibrateCameraCharuco(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
UMat cameraMatrix,
UMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
int flags,
TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraCharucoExtended(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
GpuMat cameraMatrix,
GpuMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
GpuMat stdDeviationsIntrinsics,
GpuMat stdDeviationsExtrinsics,
GpuMat perViewErrors) |
static double |
opencv_aruco.calibrateCameraCharucoExtended(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
GpuMat cameraMatrix,
GpuMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
GpuMat stdDeviationsIntrinsics,
GpuMat stdDeviationsExtrinsics,
GpuMat perViewErrors,
int flags,
TermCriteria criteria) |
static double |
opencv_aruco.calibrateCameraCharucoExtended(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat perViewErrors) |
static double |
opencv_aruco.calibrateCameraCharucoExtended(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat perViewErrors,
int flags,
TermCriteria criteria)
\brief Calibrate a camera using Charuco corners
|
static double |
opencv_aruco.calibrateCameraCharucoExtended(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
UMat cameraMatrix,
UMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
UMat stdDeviationsIntrinsics,
UMat stdDeviationsExtrinsics,
UMat perViewErrors) |
static double |
opencv_aruco.calibrateCameraCharucoExtended(MatVector charucoCorners,
MatVector charucoIds,
CharucoBoard board,
Size imageSize,
UMat cameraMatrix,
UMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
UMat stdDeviationsIntrinsics,
UMat stdDeviationsExtrinsics,
UMat perViewErrors,
int flags,
TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCameraExtended(Point3fVectorVector objectPoints,
Point2fVectorVector imagePoints,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraExtended(Point3fVectorVector objectPoints,
Point2fVectorVector imagePoints,
Size imageSize,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat perViewErrors,
int flags,
TermCriteria criteria)
\brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration
pattern.
|
static double |
opencv_calib3d.calibrateCameraRO(Point3fVectorVector objectPoints,
Point2fVectorVector imagePoints,
Size imageSize,
int iFixedPoint,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
Mat newObjPoints) |
static double |
opencv_calib3d.calibrateCameraRO(Point3fVectorVector objectPoints,
Point2fVectorVector imagePoints,
Size imageSize,
int iFixedPoint,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
Mat newObjPoints,
int flags,
TermCriteria criteria) |
static double |
opencv_calib3d.calibrateCameraROExtended(Point3fVectorVector objectPoints,
Point2fVectorVector imagePoints,
Size imageSize,
int iFixedPoint,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
Mat newObjPoints,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat stdDeviationsObjPoints,
Mat perViewErrors) |
static double |
opencv_calib3d.calibrateCameraROExtended(Point3fVectorVector objectPoints,
Point2fVectorVector imagePoints,
Size imageSize,
int iFixedPoint,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
Mat newObjPoints,
Mat stdDeviationsIntrinsics,
Mat stdDeviationsExtrinsics,
Mat stdDeviationsObjPoints,
Mat perViewErrors,
int flags,
TermCriteria criteria)
\brief Finds the camera intrinsic and extrinsic parameters from several views of a calibration pattern.
|
static void |
opencv_calib3d.calibrateHandEye(MatVector R_gripper2base,
MatVector t_gripper2base,
MatVector R_target2cam,
MatVector t_target2cam,
GpuMat R_cam2gripper,
GpuMat t_cam2gripper) |
static void |
opencv_calib3d.calibrateHandEye(MatVector R_gripper2base,
MatVector t_gripper2base,
MatVector R_target2cam,
MatVector t_target2cam,
GpuMat R_cam2gripper,
GpuMat t_cam2gripper,
int method) |
static void |
opencv_calib3d.calibrateHandEye(MatVector R_gripper2base,
MatVector t_gripper2base,
MatVector R_target2cam,
MatVector t_target2cam,
Mat R_cam2gripper,
Mat t_cam2gripper) |
static void |
opencv_calib3d.calibrateHandEye(MatVector R_gripper2base,
MatVector t_gripper2base,
MatVector R_target2cam,
MatVector t_target2cam,
Mat R_cam2gripper,
Mat t_cam2gripper,
int method)
\brief Computes Hand-Eye calibration:
_{}^{g}\textrm{T}_c |
static void |
opencv_calib3d.calibrateHandEye(MatVector R_gripper2base,
MatVector t_gripper2base,
MatVector R_target2cam,
MatVector t_target2cam,
UMat R_cam2gripper,
UMat t_cam2gripper) |
static void |
opencv_calib3d.calibrateHandEye(MatVector R_gripper2base,
MatVector t_gripper2base,
MatVector R_target2cam,
MatVector t_target2cam,
UMat R_cam2gripper,
UMat t_cam2gripper,
int method) |
static boolean |
opencv_stitching.calibrateRotatingCamera(MatVector Hs,
Mat K) |
static void |
opencv_stitching.computeImageFeatures(Feature2D featuresFinder,
MatVector images,
ImageFeatures features) |
static void |
opencv_stitching.computeImageFeatures(Feature2D featuresFinder,
MatVector images,
ImageFeatures features,
MatVector masks)
\brief
|
static void |
opencv_text.computeNMChannels(GpuMat _src,
MatVector _channels) |
static void |
opencv_text.computeNMChannels(GpuMat _src,
MatVector _channels,
int _mode) |
static void |
opencv_text.computeNMChannels(Mat _src,
MatVector _channels) |
static void |
opencv_text.computeNMChannels(Mat _src,
MatVector _channels,
int _mode)
\brief Compute the different channels to be processed independently in the N&M algorithm \cite Neumann12.
|
static void |
opencv_text.computeNMChannels(UMat _src,
MatVector _channels) |
static void |
opencv_text.computeNMChannels(UMat _src,
MatVector _channels,
int _mode) |
static int |
opencv_calib3d.decomposeHomographyMat(GpuMat H,
GpuMat K,
MatVector rotations,
MatVector translations,
MatVector normals) |
static int |
opencv_calib3d.decomposeHomographyMat(Mat H,
Mat K,
MatVector rotations,
MatVector translations,
MatVector normals)
\brief Decompose a homography matrix to rotation(s), translation(s) and plane normal(s).
|
static int |
opencv_calib3d.decomposeHomographyMat(UMat H,
UMat K,
MatVector rotations,
MatVector translations,
MatVector normals) |
static void |
opencv_photo.denoise_TVL1(MatVector observations,
Mat result) |
static void |
opencv_photo.denoise_TVL1(MatVector observations,
Mat result,
double lambda,
int niters)
\brief Primal-dual algorithm is an algorithm for solving special types of variational problems (that is,
finding a function to minimize some functional).
|
static void |
opencv_aruco.detectCharucoDiamond(GpuMat image,
MatVector markerCorners,
GpuMat markerIds,
float squareMarkerLengthRate,
MatVector diamondCorners,
GpuMat diamondIds) |
static void |
opencv_aruco.detectCharucoDiamond(GpuMat image,
MatVector markerCorners,
GpuMat markerIds,
float squareMarkerLengthRate,
MatVector diamondCorners,
GpuMat diamondIds,
GpuMat cameraMatrix,
GpuMat distCoeffs) |
static void |
opencv_aruco.detectCharucoDiamond(Mat image,
MatVector markerCorners,
Mat markerIds,
float squareMarkerLengthRate,
MatVector diamondCorners,
Mat diamondIds) |
static void |
opencv_aruco.detectCharucoDiamond(Mat image,
MatVector markerCorners,
Mat markerIds,
float squareMarkerLengthRate,
MatVector diamondCorners,
Mat diamondIds,
Mat cameraMatrix,
Mat distCoeffs)
\brief Detect ChArUco Diamond markers
|
static void |
opencv_aruco.detectCharucoDiamond(UMat image,
MatVector markerCorners,
UMat markerIds,
float squareMarkerLengthRate,
MatVector diamondCorners,
UMat diamondIds) |
static void |
opencv_aruco.detectCharucoDiamond(UMat image,
MatVector markerCorners,
UMat markerIds,
float squareMarkerLengthRate,
MatVector diamondCorners,
UMat diamondIds,
UMat cameraMatrix,
UMat distCoeffs) |
static void |
opencv_aruco.detectMarkers(GpuMat image,
Dictionary dictionary,
MatVector corners,
GpuMat ids) |
static void |
opencv_aruco.detectMarkers(GpuMat image,
Dictionary dictionary,
MatVector corners,
GpuMat ids,
DetectorParameters parameters,
MatVector rejectedImgPoints,
GpuMat cameraMatrix,
GpuMat distCoeff) |
static void |
opencv_aruco.detectMarkers(Mat image,
Dictionary dictionary,
MatVector corners,
Mat ids) |
static void |
opencv_aruco.detectMarkers(Mat image,
Dictionary dictionary,
MatVector corners,
Mat ids,
DetectorParameters parameters,
MatVector rejectedImgPoints,
Mat cameraMatrix,
Mat distCoeff)
\brief Basic marker detection
|
static void |
opencv_aruco.detectMarkers(UMat image,
Dictionary dictionary,
MatVector corners,
UMat ids) |
static void |
opencv_aruco.detectMarkers(UMat image,
Dictionary dictionary,
MatVector corners,
UMat ids,
DetectorParameters parameters,
MatVector rejectedImgPoints,
UMat cameraMatrix,
UMat distCoeff) |
static void |
opencv_imgproc.drawContours(GpuMat image,
MatVector contours,
int contourIdx,
Scalar color) |
static void |
opencv_imgproc.drawContours(GpuMat image,
MatVector contours,
int contourIdx,
Scalar color,
int thickness,
int lineType,
GpuMat hierarchy,
int maxLevel,
Point offset) |
static void |
opencv_imgproc.drawContours(Mat image,
MatVector contours,
int contourIdx,
Scalar color) |
static void |
opencv_imgproc.drawContours(Mat image,
MatVector contours,
int contourIdx,
Scalar color,
int thickness,
int lineType,
Mat hierarchy,
int maxLevel,
Point offset)
\brief Draws contours outlines or filled contours.
|
static void |
opencv_imgproc.drawContours(UMat image,
MatVector contours,
int contourIdx,
Scalar color) |
static void |
opencv_imgproc.drawContours(UMat image,
MatVector contours,
int contourIdx,
Scalar color,
int thickness,
int lineType,
UMat hierarchy,
int maxLevel,
Point offset) |
static void |
opencv_aruco.drawDetectedDiamonds(GpuMat image,
MatVector diamondCorners) |
static void |
opencv_aruco.drawDetectedDiamonds(GpuMat image,
MatVector diamondCorners,
GpuMat diamondIds,
Scalar borderColor) |
static void |
opencv_aruco.drawDetectedDiamonds(Mat image,
MatVector diamondCorners) |
static void |
opencv_aruco.drawDetectedDiamonds(Mat image,
MatVector diamondCorners,
Mat diamondIds,
Scalar borderColor)
\brief Draw a set of detected ChArUco Diamond markers
|
static void |
opencv_aruco.drawDetectedDiamonds(UMat image,
MatVector diamondCorners) |
static void |
opencv_aruco.drawDetectedDiamonds(UMat image,
MatVector diamondCorners,
UMat diamondIds,
Scalar borderColor) |
static void |
opencv_aruco.drawDetectedMarkers(GpuMat image,
MatVector corners) |
static void |
opencv_aruco.drawDetectedMarkers(GpuMat image,
MatVector corners,
GpuMat ids,
Scalar borderColor) |
static void |
opencv_aruco.drawDetectedMarkers(Mat image,
MatVector corners) |
static void |
opencv_aruco.drawDetectedMarkers(Mat image,
MatVector corners,
Mat ids,
Scalar borderColor)
\brief Draw detected markers in image
|
static void |
opencv_aruco.drawDetectedMarkers(UMat image,
MatVector corners) |
static void |
opencv_aruco.drawDetectedMarkers(UMat image,
MatVector corners,
UMat ids,
Scalar borderColor) |
static void |
opencv_text.erGrouping(GpuMat img,
MatVector channels,
ERStatVectorVector regions,
PointVectorVector groups,
RectVector groups_rects) |
static void |
opencv_text.erGrouping(GpuMat img,
MatVector channels,
ERStatVectorVector regions,
PointVectorVector groups,
RectVector groups_rects,
int method,
BytePointer filename,
float minProbablity) |
static void |
opencv_text.erGrouping(GpuMat img,
MatVector channels,
ERStatVectorVector regions,
PointVectorVector groups,
RectVector groups_rects,
int method,
String filename,
float minProbablity) |
static void |
opencv_text.erGrouping(Mat img,
MatVector channels,
ERStatVectorVector regions,
PointVectorVector groups,
RectVector groups_rects) |
static void |
opencv_text.erGrouping(Mat img,
MatVector channels,
ERStatVectorVector regions,
PointVectorVector groups,
RectVector groups_rects,
int method,
BytePointer filename,
float minProbablity)
\brief Find groups of Extremal Regions that are organized as text blocks.
|
static void |
opencv_text.erGrouping(Mat img,
MatVector channels,
ERStatVectorVector regions,
PointVectorVector groups,
RectVector groups_rects,
int method,
String filename,
float minProbablity) |
static void |
opencv_text.erGrouping(UMat img,
MatVector channels,
ERStatVectorVector regions,
PointVectorVector groups,
RectVector groups_rects) |
static void |
opencv_text.erGrouping(UMat img,
MatVector channels,
ERStatVectorVector regions,
PointVectorVector groups,
RectVector groups_rects,
int method,
BytePointer filename,
float minProbablity) |
static void |
opencv_text.erGrouping(UMat img,
MatVector channels,
ERStatVectorVector regions,
PointVectorVector groups,
RectVector groups_rects,
int method,
String filename,
float minProbablity) |
static int |
opencv_aruco.estimatePoseBoard(MatVector corners,
GpuMat ids,
Board board,
GpuMat cameraMatrix,
GpuMat distCoeffs,
GpuMat rvec,
GpuMat tvec) |
static int |
opencv_aruco.estimatePoseBoard(MatVector corners,
GpuMat ids,
Board board,
GpuMat cameraMatrix,
GpuMat distCoeffs,
GpuMat rvec,
GpuMat tvec,
boolean useExtrinsicGuess) |
static int |
opencv_aruco.estimatePoseBoard(MatVector corners,
Mat ids,
Board board,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvec,
Mat tvec) |
static int |
opencv_aruco.estimatePoseBoard(MatVector corners,
Mat ids,
Board board,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvec,
Mat tvec,
boolean useExtrinsicGuess)
\brief Pose estimation for a board of markers
|
static int |
opencv_aruco.estimatePoseBoard(MatVector corners,
UMat ids,
Board board,
UMat cameraMatrix,
UMat distCoeffs,
UMat rvec,
UMat tvec) |
static int |
opencv_aruco.estimatePoseBoard(MatVector corners,
UMat ids,
Board board,
UMat cameraMatrix,
UMat distCoeffs,
UMat rvec,
UMat tvec,
boolean useExtrinsicGuess) |
static void |
opencv_aruco.estimatePoseSingleMarkers(MatVector corners,
float markerLength,
GpuMat cameraMatrix,
GpuMat distCoeffs,
GpuMat rvecs,
GpuMat tvecs) |
static void |
opencv_aruco.estimatePoseSingleMarkers(MatVector corners,
float markerLength,
GpuMat cameraMatrix,
GpuMat distCoeffs,
GpuMat rvecs,
GpuMat tvecs,
GpuMat _objPoints) |
static void |
opencv_aruco.estimatePoseSingleMarkers(MatVector corners,
float markerLength,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvecs,
Mat tvecs) |
static void |
opencv_aruco.estimatePoseSingleMarkers(MatVector corners,
float markerLength,
Mat cameraMatrix,
Mat distCoeffs,
Mat rvecs,
Mat tvecs,
Mat _objPoints)
\brief Pose estimation for single markers
|
static void |
opencv_aruco.estimatePoseSingleMarkers(MatVector corners,
float markerLength,
UMat cameraMatrix,
UMat distCoeffs,
UMat rvecs,
UMat tvecs) |
static void |
opencv_aruco.estimatePoseSingleMarkers(MatVector corners,
float markerLength,
UMat cameraMatrix,
UMat distCoeffs,
UMat rvecs,
UMat tvecs,
UMat _objPoints) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(MatVector srcImgs,
GpuMat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(MatVector srcImgs,
GpuMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(MatVector srcImgs,
Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(MatVector srcImgs,
Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize)
\brief Modification of fastNlMeansDenoisingMulti function for colored images sequences
|
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(MatVector srcImgs,
UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingColoredMulti(MatVector srcImgs,
UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
float hColor,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
GpuMat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
GpuMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
GpuMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatPointer h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
GpuMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatPointer h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
int templateWindowSize,
int searchWindowSize)
\brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
captured in small period of time.
|
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatPointer h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
Mat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatPointer h,
int templateWindowSize,
int searchWindowSize,
int normType)
\brief Modification of fastNlMeansDenoising function for images sequence where consecutive images have been
captured in small period of time.
|
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
float h,
int templateWindowSize,
int searchWindowSize) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatPointer h) |
static void |
opencv_photo.fastNlMeansDenoisingMulti(MatVector srcImgs,
UMat dst,
int imgToDenoiseIndex,
int temporalWindowSize,
FloatPointer h,
int templateWindowSize,
int searchWindowSize,
int normType) |
static void |
opencv_imgproc.fillPoly(GpuMat img,
MatVector pts,
Scalar color) |
static void |
opencv_imgproc.fillPoly(GpuMat img,
MatVector pts,
Scalar color,
int lineType,
int shift,
Point offset) |
static void |
opencv_imgproc.fillPoly(Mat img,
MatVector pts,
Scalar color) |
static void |
opencv_imgproc.fillPoly(Mat img,
MatVector pts,
Scalar color,
int lineType,
int shift,
Point offset)
\brief Fills the area bounded by one or more polygons.
|
static void |
opencv_imgproc.fillPoly(UMat img,
MatVector pts,
Scalar color) |
static void |
opencv_imgproc.fillPoly(UMat img,
MatVector pts,
Scalar color,
int lineType,
int shift,
Point offset) |
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(MatVector rotations,
MatVector normals,
GpuMat beforePoints,
GpuMat afterPoints,
GpuMat possibleSolutions) |
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(MatVector rotations,
MatVector normals,
GpuMat beforePoints,
GpuMat afterPoints,
GpuMat possibleSolutions,
GpuMat pointsMask) |
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(MatVector rotations,
MatVector normals,
Mat beforePoints,
Mat afterPoints,
Mat possibleSolutions) |
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(MatVector rotations,
MatVector normals,
Mat beforePoints,
Mat afterPoints,
Mat possibleSolutions,
Mat pointsMask)
\brief Filters homography decompositions based on additional information.
|
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(MatVector rotations,
MatVector normals,
UMat beforePoints,
UMat afterPoints,
UMat possibleSolutions) |
static void |
opencv_calib3d.filterHomographyDecompByVisibleRefpoints(MatVector rotations,
MatVector normals,
UMat beforePoints,
UMat afterPoints,
UMat possibleSolutions,
UMat pointsMask) |
static void |
opencv_imgproc.findContours(GpuMat image,
MatVector contours,
GpuMat hierarchy,
int mode,
int method) |
static void |
opencv_imgproc.findContours(GpuMat image,
MatVector contours,
GpuMat hierarchy,
int mode,
int method,
Point offset) |
static void |
opencv_imgproc.findContours(GpuMat image,
MatVector contours,
int mode,
int method) |
static void |
opencv_imgproc.findContours(GpuMat image,
MatVector contours,
int mode,
int method,
Point offset) |
static void |
opencv_imgproc.findContours(Mat image,
MatVector contours,
int mode,
int method) |
static void |
opencv_imgproc.findContours(Mat image,
MatVector contours,
int mode,
int method,
Point offset)
\overload
|
static void |
opencv_imgproc.findContours(Mat image,
MatVector contours,
Mat hierarchy,
int mode,
int method) |
static void |
opencv_imgproc.findContours(Mat image,
MatVector contours,
Mat hierarchy,
int mode,
int method,
Point offset)
\brief Finds contours in a binary image.
|
static void |
opencv_imgproc.findContours(UMat image,
MatVector contours,
int mode,
int method) |
static void |
opencv_imgproc.findContours(UMat image,
MatVector contours,
int mode,
int method,
Point offset) |
static void |
opencv_imgproc.findContours(UMat image,
MatVector contours,
UMat hierarchy,
int mode,
int method) |
static void |
opencv_imgproc.findContours(UMat image,
MatVector contours,
UMat hierarchy,
int mode,
int method,
Point offset) |
static void |
opencv_aruco.getBoardObjectAndImagePoints(Board board,
MatVector detectedCorners,
GpuMat detectedIds,
GpuMat objPoints,
GpuMat imgPoints) |
static void |
opencv_aruco.getBoardObjectAndImagePoints(Board board,
MatVector detectedCorners,
Mat detectedIds,
Mat objPoints,
Mat imgPoints)
\brief Given a board configuration and a set of detected markers, returns the corresponding
image points and object points to call solvePnP
|
static void |
opencv_aruco.getBoardObjectAndImagePoints(Board board,
MatVector detectedCorners,
UMat detectedIds,
UMat objPoints,
UMat imgPoints) |
static Mat |
opencv_videostab.getMotion(int from,
int to,
MatVector motions)
\brief Computes motion between two frames assuming that all the intermediate motions are known.
|
static void |
opencv_core.hconcat(MatVector src,
GpuMat dst) |
static void |
opencv_core.hconcat(MatVector src,
Mat dst)
\overload
|
static void |
opencv_core.hconcat(MatVector src,
UMat dst) |
static void |
opencv_dnn.imagesFromBlob(Mat blob_,
MatVector images_)
\brief Parse a 4D blob and output the images it contains as 2D arrays through a simpler data structure
(std::vector
|
static boolean |
opencv_imgcodecs.imreadmulti(BytePointer filename,
MatVector mats) |
static boolean |
opencv_imgcodecs.imreadmulti(BytePointer filename,
MatVector mats,
int flags)
\brief Loads a multi-page image from a file.
|
static boolean |
opencv_imgcodecs.imreadmulti(String filename,
MatVector mats) |
static boolean |
opencv_imgcodecs.imreadmulti(String filename,
MatVector mats,
int flags) |
static int |
opencv_aruco.interpolateCornersCharuco(MatVector markerCorners,
GpuMat markerIds,
GpuMat image,
CharucoBoard board,
GpuMat charucoCorners,
GpuMat charucoIds) |
static int |
opencv_aruco.interpolateCornersCharuco(MatVector markerCorners,
GpuMat markerIds,
GpuMat image,
CharucoBoard board,
GpuMat charucoCorners,
GpuMat charucoIds,
GpuMat cameraMatrix,
GpuMat distCoeffs,
int minMarkers) |
static int |
opencv_aruco.interpolateCornersCharuco(MatVector markerCorners,
Mat markerIds,
Mat image,
CharucoBoard board,
Mat charucoCorners,
Mat charucoIds) |
static int |
opencv_aruco.interpolateCornersCharuco(MatVector markerCorners,
Mat markerIds,
Mat image,
CharucoBoard board,
Mat charucoCorners,
Mat charucoIds,
Mat cameraMatrix,
Mat distCoeffs,
int minMarkers)
\brief Interpolate position of ChArUco board corners
|
static int |
opencv_aruco.interpolateCornersCharuco(MatVector markerCorners,
UMat markerIds,
UMat image,
CharucoBoard board,
UMat charucoCorners,
UMat charucoIds) |
static int |
opencv_aruco.interpolateCornersCharuco(MatVector markerCorners,
UMat markerIds,
UMat image,
CharucoBoard board,
UMat charucoCorners,
UMat charucoIds,
UMat cameraMatrix,
UMat distCoeffs,
int minMarkers) |
static void |
opencv_core.merge(MatVector mv,
GpuMat dst) |
static void |
opencv_core.merge(MatVector mv,
Mat dst)
\overload
|
static void |
opencv_core.merge(MatVector mv,
UMat dst) |
static void |
opencv_core.mixChannels(MatVector src,
MatVector dst,
IntPointer fromTo)
\overload
|
static void |
opencv_core.mixChannels(MatVector src,
MatVector dst,
IntPointer fromTo,
long npairs)
\overload
|
static void |
opencv_imgproc.polylines(GpuMat img,
MatVector pts,
boolean isClosed,
Scalar color) |
static void |
opencv_imgproc.polylines(GpuMat img,
MatVector pts,
boolean isClosed,
Scalar color,
int thickness,
int lineType,
int shift) |
static void |
opencv_imgproc.polylines(Mat img,
MatVector pts,
boolean isClosed,
Scalar color) |
static void |
opencv_imgproc.polylines(Mat img,
MatVector pts,
boolean isClosed,
Scalar color,
int thickness,
int lineType,
int shift)
\brief Draws several polygonal curves.
|
static void |
opencv_imgproc.polylines(UMat img,
MatVector pts,
boolean isClosed,
Scalar color) |
static void |
opencv_imgproc.polylines(UMat img,
MatVector pts,
boolean isClosed,
Scalar color,
int thickness,
int lineType,
int shift) |
static float |
opencv_calib3d.rectify3Collinear(GpuMat cameraMatrix1,
GpuMat distCoeffs1,
GpuMat cameraMatrix2,
GpuMat distCoeffs2,
GpuMat cameraMatrix3,
GpuMat distCoeffs3,
MatVector imgpt1,
MatVector imgpt3,
Size imageSize,
GpuMat R12,
GpuMat T12,
GpuMat R13,
GpuMat T13,
GpuMat R1,
GpuMat R2,
GpuMat R3,
GpuMat P1,
GpuMat P2,
GpuMat P3,
GpuMat Q,
double alpha,
Size newImgSize,
Rect roi1,
Rect roi2,
int flags) |
static float |
opencv_calib3d.rectify3Collinear(Mat cameraMatrix1,
Mat distCoeffs1,
Mat cameraMatrix2,
Mat distCoeffs2,
Mat cameraMatrix3,
Mat distCoeffs3,
MatVector imgpt1,
MatVector imgpt3,
Size imageSize,
Mat R12,
Mat T12,
Mat R13,
Mat T13,
Mat R1,
Mat R2,
Mat R3,
Mat P1,
Mat P2,
Mat P3,
Mat Q,
double alpha,
Size newImgSize,
Rect roi1,
Rect roi2,
int flags)
computes the rectification transformations for 3-head camera, where all the heads are on the same line.
|
static float |
opencv_calib3d.rectify3Collinear(UMat cameraMatrix1,
UMat distCoeffs1,
UMat cameraMatrix2,
UMat distCoeffs2,
UMat cameraMatrix3,
UMat distCoeffs3,
MatVector imgpt1,
MatVector imgpt3,
Size imageSize,
UMat R12,
UMat T12,
UMat R13,
UMat T13,
UMat R1,
UMat R2,
UMat R3,
UMat P1,
UMat P2,
UMat P3,
UMat Q,
double alpha,
Size newImgSize,
Rect roi1,
Rect roi2,
int flags) |
static void |
opencv_aruco.refineDetectedMarkers(GpuMat image,
Board board,
MatVector detectedCorners,
GpuMat detectedIds,
MatVector rejectedCorners) |
static void |
opencv_aruco.refineDetectedMarkers(GpuMat image,
Board board,
MatVector detectedCorners,
GpuMat detectedIds,
MatVector rejectedCorners,
GpuMat cameraMatrix,
GpuMat distCoeffs,
float minRepDistance,
float errorCorrectionRate,
boolean checkAllOrders,
GpuMat recoveredIdxs,
DetectorParameters parameters) |
static void |
opencv_aruco.refineDetectedMarkers(Mat image,
Board board,
MatVector detectedCorners,
Mat detectedIds,
MatVector rejectedCorners) |
static void |
opencv_aruco.refineDetectedMarkers(Mat image,
Board board,
MatVector detectedCorners,
Mat detectedIds,
MatVector rejectedCorners,
Mat cameraMatrix,
Mat distCoeffs,
float minRepDistance,
float errorCorrectionRate,
boolean checkAllOrders,
Mat recoveredIdxs,
DetectorParameters parameters)
\brief Refind not detected markers based on the already detected and the board layout
|
static void |
opencv_aruco.refineDetectedMarkers(UMat image,
Board board,
MatVector detectedCorners,
UMat detectedIds,
MatVector rejectedCorners) |
static void |
opencv_aruco.refineDetectedMarkers(UMat image,
Board board,
MatVector detectedCorners,
UMat detectedIds,
MatVector rejectedCorners,
UMat cameraMatrix,
UMat distCoeffs,
float minRepDistance,
float errorCorrectionRate,
boolean checkAllOrders,
UMat recoveredIdxs,
DetectorParameters parameters) |
static int |
opencv_calib3d.solveP3P(GpuMat objectPoints,
GpuMat imagePoints,
GpuMat cameraMatrix,
GpuMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
int flags) |
static int |
opencv_calib3d.solveP3P(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
int flags)
\brief Finds an object pose from 3 3D-2D point correspondences.
|
static int |
opencv_calib3d.solveP3P(UMat objectPoints,
UMat imagePoints,
UMat cameraMatrix,
UMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
int flags) |
static int |
opencv_calib3d.solvePnPGeneric(GpuMat objectPoints,
GpuMat imagePoints,
GpuMat cameraMatrix,
GpuMat distCoeffs,
MatVector rvecs,
MatVector tvecs) |
static int |
opencv_calib3d.solvePnPGeneric(GpuMat objectPoints,
GpuMat imagePoints,
GpuMat cameraMatrix,
GpuMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
boolean useExtrinsicGuess,
int flags,
GpuMat rvec,
GpuMat tvec,
GpuMat reprojectionError) |
static int |
opencv_calib3d.solvePnPGeneric(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs) |
static int |
opencv_calib3d.solvePnPGeneric(Mat objectPoints,
Mat imagePoints,
Mat cameraMatrix,
Mat distCoeffs,
MatVector rvecs,
MatVector tvecs,
boolean useExtrinsicGuess,
int flags,
Mat rvec,
Mat tvec,
Mat reprojectionError)
\brief Finds an object pose from 3D-2D point correspondences.
|
static int |
opencv_calib3d.solvePnPGeneric(UMat objectPoints,
UMat imagePoints,
UMat cameraMatrix,
UMat distCoeffs,
MatVector rvecs,
MatVector tvecs) |
static int |
opencv_calib3d.solvePnPGeneric(UMat objectPoints,
UMat imagePoints,
UMat cameraMatrix,
UMat distCoeffs,
MatVector rvecs,
MatVector tvecs,
boolean useExtrinsicGuess,
int flags,
UMat rvec,
UMat tvec,
UMat reprojectionError) |
static void |
opencv_core.split(GpuMat m,
MatVector mv) |
static void |
opencv_core.split(Mat m,
MatVector mv)
\overload
|
static void |
opencv_core.split(UMat m,
MatVector mv) |
static void |
opencv_core.vconcat(MatVector src,
GpuMat dst) |
static void |
opencv_core.vconcat(MatVector src,
Mat dst)
\overload
|
static void |
opencv_core.vconcat(MatVector src,
UMat dst) |
static void |
opencv_stitching.waveCorrect(MatVector rmats,
int kind)
\brief Tries to make panorama more horizontal (or vertical).
|
Modifier and Type | Method and Description |
---|---|
static Board |
Board.create(MatVector objPoints,
Dictionary dictionary,
GpuMat ids) |
static Board |
Board.create(MatVector objPoints,
Dictionary dictionary,
Mat ids)
\brief Provide way to create Board by passing necessary data.
|
static Board |
Board.create(MatVector objPoints,
Dictionary dictionary,
UMat ids) |
Modifier and Type | Method and Description |
---|---|
MatVector[] |
MatVectorVector.get() |
MatVector |
MatVectorVector.Iterator.get() |
MatVector |
MatVectorVector.get(long i) |
MatVector |
MatVectorVector.pop_back() |
MatVector |
MatVector.push_back(Mat value) |
MatVector |
MatVector.put(long i,
Mat value) |
MatVector |
MatVector.put(Mat... array) |
MatVector |
MatVector.put(Mat value) |
MatVector |
MatVector.put(MatVector x) |
Modifier and Type | Method and Description |
---|---|
void |
LDA.compute(MatVector src,
GpuMat labels) |
void |
LDA.compute(MatVector src,
Mat labels)
Compute the discriminants for data in src (row aligned) and labels.
|
void |
LDA.compute(MatVector src,
UMat labels) |
MatVectorVector.Iterator |
MatVectorVector.insert(MatVectorVector.Iterator pos,
MatVector value) |
MatVectorVector |
MatVectorVector.push_back(MatVector value) |
MatVectorVector |
MatVectorVector.put(long i,
MatVector value) |
MatVectorVector |
MatVectorVector.put(MatVector... array) |
MatVectorVector |
MatVectorVector.put(MatVector value) |
MatVector |
MatVector.put(MatVector x) |
Constructor and Description |
---|
LDA(MatVector src,
GpuMat labels) |
LDA(MatVector src,
GpuMat labels,
int num_components) |
LDA(MatVector src,
Mat labels) |
LDA(MatVector src,
Mat labels,
int num_components)
Initializes and performs a Discriminant Analysis with Fisher's
Optimization Criterion on given data in src and corresponding labels
in labels.
|
LDA(MatVector src,
UMat labels) |
LDA(MatVector src,
UMat labels,
int num_components) |
MatVectorVector(MatVector... array) |
MatVectorVector(MatVector value) |
Modifier and Type | Method and Description |
---|---|
MatVector |
Layer.blobs()
List of learned parameters must be stored here to allow read them by using Net::getParam().
|
MatVector |
LayerParams.blobs()
List of learned parameters stored as blobs.
|
MatVector |
Layer.finalize(MatVector inputs)
Deprecated.
Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
|
Modifier and Type | Method and Description |
---|---|
void |
Layer.applyHalideScheduler(BackendNode node,
MatPointerVector inputs,
MatVector outputs,
int targetId)
\brief Automatic Halide scheduling based on layer hyper-parameters.
|
Layer |
Layer.blobs(MatVector setter) |
LayerParams |
LayerParams.blobs(MatVector setter) |
void |
Layer.finalize(MatPointerVector input,
MatVector output)
Deprecated.
Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
|
MatVector |
Layer.finalize(MatVector inputs)
Deprecated.
Use Layer::finalize(InputArrayOfArrays, OutputArrayOfArrays) instead
|
void |
Layer.finalize(MatVector inputs,
MatVector outputs)
\brief Computes and sets internal parameters according to inputs, outputs and blobs.
|
void |
Layer.forward_fallback(MatVector inputs,
MatVector outputs,
MatVector internals)
\brief Given the \p input blobs, computes the output \p blobs.
|
void |
Layer.forward(MatPointerVector input,
MatVector output,
MatVector internals)
Deprecated.
Use Layer::forward(InputArrayOfArrays, OutputArrayOfArrays, OutputArrayOfArrays) instead
|
void |
Net.forward(MatVector outputBlobs) |
void |
Net.forward(MatVector outputBlobs,
BytePointer outputName)
\brief Runs forward pass to compute output of layer with name \p outputName.
|
void |
Layer.forward(MatVector inputs,
MatVector outputs,
MatVector internals)
\brief Given the \p input blobs, computes the output \p blobs.
|
void |
Net.forward(MatVector outputBlobs,
String outputName) |
void |
Net.forward(MatVector outputBlobs,
StringVector outBlobNames)
\brief Runs forward pass to compute outputs of layers listed in \p outBlobNames.
|
void |
Model.predict(GpuMat frame,
MatVector outs) |
void |
Model.predict(Mat frame,
MatVector outs)
\brief Given the \p input frame, create input blob, run net and return the output \p blobs.
|
void |
Model.predict(UMat frame,
MatVector outs) |
void |
Layer.run(MatVector inputs,
MatVector outputs,
MatVector internals)
Deprecated.
This method will be removed in the future release.
|
Modifier and Type | Method and Description |
---|---|
void |
DnnSuperResImpl.upsampleMultioutput(GpuMat img,
MatVector imgs_new,
int[] scale_factors,
StringVector node_names) |
void |
DnnSuperResImpl.upsampleMultioutput(GpuMat img,
MatVector imgs_new,
IntBuffer scale_factors,
StringVector node_names) |
void |
DnnSuperResImpl.upsampleMultioutput(GpuMat img,
MatVector imgs_new,
IntPointer scale_factors,
StringVector node_names) |
void |
DnnSuperResImpl.upsampleMultioutput(Mat img,
MatVector imgs_new,
int[] scale_factors,
StringVector node_names) |
void |
DnnSuperResImpl.upsampleMultioutput(Mat img,
MatVector imgs_new,
IntBuffer scale_factors,
StringVector node_names) |
void |
DnnSuperResImpl.upsampleMultioutput(Mat img,
MatVector imgs_new,
IntPointer scale_factors,
StringVector node_names)
\brief Upsample via neural network of multiple outputs
|
void |
DnnSuperResImpl.upsampleMultioutput(UMat img,
MatVector imgs_new,
int[] scale_factors,
StringVector node_names) |
void |
DnnSuperResImpl.upsampleMultioutput(UMat img,
MatVector imgs_new,
IntBuffer scale_factors,
StringVector node_names) |
void |
DnnSuperResImpl.upsampleMultioutput(UMat img,
MatVector imgs_new,
IntPointer scale_factors,
StringVector node_names) |
Modifier and Type | Method and Description |
---|---|
MatVector |
LBPHFaceRecognizer.getHistograms() |
MatVector |
BasicFaceRecognizer.getProjections() |
Modifier and Type | Method and Description |
---|---|
void |
FaceRecognizer.train(MatVector src,
GpuMat labels) |
void |
FaceRecognizer.train(MatVector src,
Mat labels)
\brief Trains a FaceRecognizer with given data and associated labels.
|
void |
FaceRecognizer.train(MatVector src,
UMat labels) |
boolean |
FacemarkKazemi.training(MatVector images,
Point2fVectorVector landmarks,
BytePointer configfile,
Size scale) |
boolean |
FacemarkKazemi.training(MatVector images,
Point2fVectorVector landmarks,
BytePointer configfile,
Size scale,
BytePointer modelFilename)
\brief This function is used to train the model using gradient boosting to get a cascade of regressors
which can then be used to predict shape.
|
boolean |
FacemarkKazemi.training(MatVector images,
Point2fVectorVector landmarks,
String configfile,
Size scale) |
boolean |
FacemarkKazemi.training(MatVector images,
Point2fVectorVector landmarks,
String configfile,
Size scale,
String modelFilename) |
void |
FaceRecognizer.update(MatVector src,
GpuMat labels) |
void |
FaceRecognizer.update(MatVector src,
Mat labels)
\brief Updates a FaceRecognizer with given data and associated labels.
|
void |
FaceRecognizer.update(MatVector src,
UMat labels) |
Modifier and Type | Method and Description |
---|---|
MatVector |
BOWTrainer.getDescriptors()
\brief Returns a training set of descriptors.
|
MatVector |
DescriptorMatcher.getTrainDescriptors()
\brief Returns a constant link to the train descriptor collection trainDescCollection .
|
Modifier and Type | Method and Description |
---|---|
void |
FlannBasedMatcher.add(MatVector descriptors) |
void |
DescriptorMatcher.add(MatVector descriptors)
\brief Adds descriptors to train a CPU(trainDescCollectionis) or GPU(utrainDescCollectionis) descriptor
collection.
|
void |
Feature2D.compute(MatVector images,
KeyPointVectorVector keypoints,
MatVector descriptors)
\overload
|
void |
Feature2D.detect(MatVector images,
KeyPointVectorVector keypoints) |
void |
Feature2D.detect(MatVector images,
KeyPointVectorVector keypoints,
MatVector masks)
\overload
|
void |
DescriptorMatcher.knnMatch(GpuMat queryDescriptors,
DMatchVectorVector matches,
int k,
MatVector masks,
boolean compactResult) |
void |
DescriptorMatcher.knnMatch(Mat queryDescriptors,
DMatchVectorVector matches,
int k,
MatVector masks,
boolean compactResult)
\overload
|
void |
DescriptorMatcher.knnMatch(UMat queryDescriptors,
DMatchVectorVector matches,
int k,
MatVector masks,
boolean compactResult) |
void |
DescriptorMatcher.match(GpuMat queryDescriptors,
DMatchVector matches,
MatVector masks) |
void |
DescriptorMatcher.match(Mat queryDescriptors,
DMatchVector matches,
MatVector masks)
\overload
|
void |
DescriptorMatcher.match(UMat queryDescriptors,
DMatchVector matches,
MatVector masks) |
void |
DescriptorMatcher.radiusMatch(GpuMat queryDescriptors,
DMatchVectorVector matches,
float maxDistance,
MatVector masks,
boolean compactResult) |
void |
DescriptorMatcher.radiusMatch(Mat queryDescriptors,
DMatchVectorVector matches,
float maxDistance,
MatVector masks,
boolean compactResult)
\overload
|
void |
DescriptorMatcher.radiusMatch(UMat queryDescriptors,
DMatchVectorVector matches,
float maxDistance,
MatVector masks,
boolean compactResult) |
Modifier and Type | Method and Description |
---|---|
void |
EM.getCovs(MatVector covs)
\brief Returns covariation matrices
|
Modifier and Type | Method and Description |
---|---|
boolean |
QRCodeDetector.decodeMulti(GpuMat img,
GpuMat points,
StringVector decoded_info,
MatVector straight_qrcode) |
boolean |
QRCodeDetector.decodeMulti(Mat img,
Mat points,
StringVector decoded_info,
MatVector straight_qrcode)
\brief Decodes QR codes in image once it's found by the detect() method.
|
boolean |
QRCodeDetector.decodeMulti(UMat img,
UMat points,
StringVector decoded_info,
MatVector straight_qrcode) |
boolean |
QRCodeDetector.detectAndDecodeMulti(GpuMat img,
StringVector decoded_info,
GpuMat points,
MatVector straight_qrcode) |
boolean |
QRCodeDetector.detectAndDecodeMulti(Mat img,
StringVector decoded_info,
Mat points,
MatVector straight_qrcode)
\brief Both detects and decodes QR codes
|
boolean |
QRCodeDetector.detectAndDecodeMulti(UMat img,
StringVector decoded_info,
UMat points,
MatVector straight_qrcode) |
Modifier and Type | Method and Description |
---|---|
void |
AlignMTB.process(GpuMatVector src,
MatVector dst) |
void |
AlignMTB.process(GpuMatVector src,
MatVector dst,
GpuMat times,
GpuMat response) |
void |
AlignExposures.process(GpuMatVector src,
MatVector dst,
GpuMat times,
GpuMat response) |
void |
AlignMTB.process(GpuMatVector src,
MatVector dst,
Mat times,
Mat response) |
void |
AlignExposures.process(GpuMatVector src,
MatVector dst,
Mat times,
Mat response) |
void |
AlignMTB.process(GpuMatVector src,
MatVector dst,
UMat times,
UMat response) |
void |
AlignExposures.process(GpuMatVector src,
MatVector dst,
UMat times,
UMat response) |
void |
MergeMertens.process(MatVector src,
GpuMat dst) |
void |
CalibrateCRF.process(MatVector src,
GpuMat dst,
GpuMat times) |
void |
MergeDebevec.process(MatVector src,
GpuMat dst,
GpuMat times) |
void |
MergeRobertson.process(MatVector src,
GpuMat dst,
GpuMat times) |
void |
MergeDebevec.process(MatVector src,
GpuMat dst,
GpuMat times,
GpuMat response) |
void |
MergeRobertson.process(MatVector src,
GpuMat dst,
GpuMat times,
GpuMat response) |
void |
MergeExposures.process(MatVector src,
GpuMat dst,
GpuMat times,
GpuMat response) |
void |
MergeMertens.process(MatVector src,
GpuMat dst,
GpuMat times,
GpuMat response) |
void |
MergeMertens.process(MatVector src,
Mat dst)
\brief Short version of process, that doesn't take extra arguments.
|
void |
CalibrateCRF.process(MatVector src,
Mat dst,
Mat times)
\brief Recovers inverse camera response.
|
void |
MergeDebevec.process(MatVector src,
Mat dst,
Mat times) |
void |
MergeRobertson.process(MatVector src,
Mat dst,
Mat times) |
void |
MergeDebevec.process(MatVector src,
Mat dst,
Mat times,
Mat response) |
void |
MergeRobertson.process(MatVector src,
Mat dst,
Mat times,
Mat response) |
void |
MergeExposures.process(MatVector src,
Mat dst,
Mat times,
Mat response)
\brief Merges images.
|
void |
MergeMertens.process(MatVector src,
Mat dst,
Mat times,
Mat response) |
void |
AlignMTB.process(MatVector src,
MatVector dst)
\brief Short version of process, that doesn't take extra arguments.
|
void |
AlignMTB.process(MatVector src,
MatVector dst,
GpuMat times,
GpuMat response) |
void |
AlignExposures.process(MatVector src,
MatVector dst,
GpuMat times,
GpuMat response) |
void |
AlignMTB.process(MatVector src,
MatVector dst,
Mat times,
Mat response) |
void |
AlignExposures.process(MatVector src,
MatVector dst,
Mat times,
Mat response)
\brief Aligns images
|
void |
AlignMTB.process(MatVector src,
MatVector dst,
UMat times,
UMat response) |
void |
AlignExposures.process(MatVector src,
MatVector dst,
UMat times,
UMat response) |
void |
MergeMertens.process(MatVector src,
UMat dst) |
void |
CalibrateCRF.process(MatVector src,
UMat dst,
UMat times) |
void |
MergeDebevec.process(MatVector src,
UMat dst,
UMat times) |
void |
MergeRobertson.process(MatVector src,
UMat dst,
UMat times) |
void |
MergeDebevec.process(MatVector src,
UMat dst,
UMat times,
UMat response) |
void |
MergeRobertson.process(MatVector src,
UMat dst,
UMat times,
UMat response) |
void |
MergeExposures.process(MatVector src,
UMat dst,
UMat times,
UMat response) |
void |
MergeMertens.process(MatVector src,
UMat dst,
UMat times,
UMat response) |
void |
AlignMTB.process(UMatVector src,
MatVector dst) |
void |
AlignMTB.process(UMatVector src,
MatVector dst,
GpuMat times,
GpuMat response) |
void |
AlignExposures.process(UMatVector src,
MatVector dst,
GpuMat times,
GpuMat response) |
void |
AlignMTB.process(UMatVector src,
MatVector dst,
Mat times,
Mat response) |
void |
AlignExposures.process(UMatVector src,
MatVector dst,
Mat times,
Mat response) |
void |
AlignMTB.process(UMatVector src,
MatVector dst,
UMat times,
UMat response) |
void |
AlignExposures.process(UMatVector src,
MatVector dst,
UMat times,
UMat response) |
Modifier and Type | Method and Description |
---|---|
int |
Stitcher.composePanorama(MatVector images,
GpuMat pano) |
int |
Stitcher.composePanorama(MatVector images,
Mat pano)
\brief These functions try to compose the given images (or images stored internally from the other function
calls) into the final pano under the assumption that the image transformations were estimated
before.
|
int |
Stitcher.composePanorama(MatVector images,
UMat pano) |
int |
Stitcher.estimateTransform(MatVector images) |
int |
Stitcher.estimateTransform(MatVector images,
MatVector masks)
\brief These functions try to match the given images and to estimate rotations of each camera.
|
void |
NoExposureCompensator.getMatGains(MatVector umv) |
void |
GainCompensator.getMatGains(MatVector umv) |
void |
ExposureCompensator.getMatGains(MatVector arg0) |
void |
BlocksCompensator.getMatGains(MatVector umv) |
void |
BlocksGainCompensator.getMatGains(MatVector umv) |
void |
ChannelsCompensator.getMatGains(MatVector umv) |
void |
NoExposureCompensator.setMatGains(MatVector umv) |
void |
GainCompensator.setMatGains(MatVector umv) |
void |
ExposureCompensator.setMatGains(MatVector arg0) |
void |
BlocksCompensator.setMatGains(MatVector umv) |
void |
BlocksGainCompensator.setMatGains(MatVector umv) |
void |
ChannelsCompensator.setMatGains(MatVector umv) |
int |
Stitcher.stitch(MatVector images,
GpuMat pano) |
int |
Stitcher.stitch(MatVector images,
Mat pano)
\overload
|
int |
Stitcher.stitch(MatVector images,
MatVector masks,
GpuMat pano) |
int |
Stitcher.stitch(MatVector images,
MatVector masks,
Mat pano)
\brief These functions try to stitch the given images.
|
int |
Stitcher.stitch(MatVector images,
MatVector masks,
UMat pano) |
int |
Stitcher.stitch(MatVector images,
UMat pano) |
Modifier and Type | Method and Description |
---|---|
void |
SinusoidalPattern.computeDataModulationTerm(MatVector patternImages,
GpuMat dataModulationTerm,
GpuMat shadowMask) |
void |
SinusoidalPattern.computeDataModulationTerm(MatVector patternImages,
Mat dataModulationTerm,
Mat shadowMask)
\brief compute the data modulation term.
|
void |
SinusoidalPattern.computeDataModulationTerm(MatVector patternImages,
UMat dataModulationTerm,
UMat shadowMask) |
void |
SinusoidalPattern.computePhaseMap(MatVector patternImages,
GpuMat wrappedPhaseMap) |
void |
SinusoidalPattern.computePhaseMap(MatVector patternImages,
GpuMat wrappedPhaseMap,
GpuMat shadowMask,
GpuMat fundamental) |
void |
SinusoidalPattern.computePhaseMap(MatVector patternImages,
Mat wrappedPhaseMap) |
void |
SinusoidalPattern.computePhaseMap(MatVector patternImages,
Mat wrappedPhaseMap,
Mat shadowMask,
Mat fundamental)
\brief Compute a wrapped phase map from sinusoidal patterns.
|
void |
SinusoidalPattern.computePhaseMap(MatVector patternImages,
UMat wrappedPhaseMap) |
void |
SinusoidalPattern.computePhaseMap(MatVector patternImages,
UMat wrappedPhaseMap,
UMat shadowMask,
UMat fundamental) |
boolean |
StructuredLightPattern.decode(MatVectorVector patternImages,
GpuMat disparityMap,
MatVector blackImages,
MatVector whiteImages,
int flags) |
boolean |
StructuredLightPattern.decode(MatVectorVector patternImages,
Mat disparityMap,
MatVector blackImages,
MatVector whiteImages,
int flags)
\brief Decodes the structured light pattern, generating a disparity map
|
boolean |
StructuredLightPattern.decode(MatVectorVector patternImages,
UMat disparityMap,
MatVector blackImages,
MatVector whiteImages,
int flags) |
void |
SinusoidalPattern.findProCamMatches(GpuMat projUnwrappedPhaseMap,
GpuMat camUnwrappedPhaseMap,
MatVector matches) |
void |
SinusoidalPattern.findProCamMatches(Mat projUnwrappedPhaseMap,
Mat camUnwrappedPhaseMap,
MatVector matches)
\brief Find correspondences between the two devices thanks to unwrapped phase maps.
|
void |
SinusoidalPattern.findProCamMatches(UMat projUnwrappedPhaseMap,
UMat camUnwrappedPhaseMap,
MatVector matches) |
boolean |
StructuredLightPattern.generate(MatVector patternImages)
\brief Generates the structured light pattern to project.
|
boolean |
GrayCodePattern.getProjPixel(MatVector patternImages,
int x,
int y,
Point projPix)
\brief For a (x,y) pixel of a camera returns the corresponding projector pixel.
|
Modifier and Type | Method and Description |
---|---|
MatVector |
TrackerFeatureSet.getResponses()
\brief Get the responses
|
MatVector |
TrackerSampler.getSamples()
\brief Return the samples from all TrackerSamplerAlgorithm, \cite AAM Fig.
|
Modifier and Type | Method and Description |
---|---|
void |
Detector.classifySmooth(MatVector image) |
void |
Detector.classifySmooth(MatVector image,
float minMargin) |
float |
StrongClassifierDirectSelection.classifySmooth(MatVector images,
Rect sampleROI,
int[] idx) |
float |
StrongClassifierDirectSelection.classifySmooth(MatVector images,
Rect sampleROI,
IntBuffer idx) |
float |
StrongClassifierDirectSelection.classifySmooth(MatVector images,
Rect sampleROI,
IntPointer idx) |
void |
TrackerFeature.compute(MatVector images,
Mat response)
\brief Compute the features in the images collection
|
void |
TrackerFeatureSet.extraction(MatVector images)
\brief Extract features from the images collection
|
boolean |
TrackerFeatureHAAR.extractSelected(int[] selFeatures,
MatVector images,
Mat response) |
boolean |
TrackerFeatureHAAR.extractSelected(IntBuffer selFeatures,
MatVector images,
Mat response) |
boolean |
TrackerFeatureHAAR.extractSelected(IntPointer selFeatures,
MatVector images,
Mat response)
\brief Compute the features only for the selected indices in the images collection
|
void |
TrackerModel.modelEstimation(MatVector responses)
\brief Estimate the most likely target location
|
boolean |
TrackerSamplerAlgorithm.sampling(Mat image,
Rect boundingBox,
MatVector sample)
\brief Computes the regions starting from a position in an image.
|
boolean |
TrackerSamplerCS.samplingImpl(Mat image,
Rect boundingBox,
MatVector sample) |
Modifier and Type | Method and Description |
---|---|
MatVector |
DeblurerBase.frames() |
MatVector |
InpainterBase.frames() |
MatVector |
DeblurerBase.motions() |
MatVector |
WobbleSuppressorBase.motions() |
MatVector |
InpainterBase.motions() |
MatVector |
WobbleSuppressorBase.motions2() |
MatVector |
WobbleSuppressorBase.stabilizationMotions() |
MatVector |
InpainterBase.stabilizationMotions() |
MatVector |
InpainterBase.stabilizedFrames() |
Modifier and Type | Method and Description |
---|---|
void |
DeblurerBase.setFrames(MatVector val) |
void |
InpaintingPipeline.setFrames(MatVector val) |
void |
InpainterBase.setFrames(MatVector val) |
void |
DeblurerBase.setMotions(MatVector val) |
void |
WobbleSuppressorBase.setMotions(MatVector val) |
void |
InpaintingPipeline.setMotions(MatVector val) |
void |
InpainterBase.setMotions(MatVector val) |
void |
WobbleSuppressorBase.setMotions2(MatVector val) |
void |
WobbleSuppressorBase.setStabilizationMotions(MatVector val) |
void |
InpaintingPipeline.setStabilizationMotions(MatVector val) |
void |
InpainterBase.setStabilizationMotions(MatVector val) |
void |
InpaintingPipeline.setStabilizedFrames(MatVector val) |
void |
InpainterBase.setStabilizedFrames(MatVector val) |
Mat |
GaussianMotionFilter.stabilize(int idx,
MatVector motions,
Range range) |
Mat |
MotionFilterBase.stabilize(int idx,
MatVector motions,
Range range) |
void |
IMotionStabilizer.stabilize(int size,
MatVector motions,
Range range,
Mat stabilizationMotions)
assumes that [0, size-1) is in or equals to [range.first, range.second)
|
void |
MotionStabilizationPipeline.stabilize(int size,
MatVector motions,
Range range,
Mat stabilizationMotions) |
void |
MotionFilterBase.stabilize(int size,
MatVector motions,
Range range,
Mat stabilizationMotions) |
void |
LpMotionStabilizer.stabilize(int size,
MatVector motions,
Range range,
Mat stabilizationMotions) |
Modifier and Type | Method and Description |
---|---|
void |
DAISY.compute(MatVector images,
KeyPointVectorVector keypoints,
MatVector descriptors) |
void |
PCTSignaturesSQFD.computeQuadraticFormDistances(Mat sourceSignature,
MatVector imageSignatures,
float[] distances) |
void |
PCTSignaturesSQFD.computeQuadraticFormDistances(Mat sourceSignature,
MatVector imageSignatures,
FloatBuffer distances) |
void |
PCTSignaturesSQFD.computeQuadraticFormDistances(Mat sourceSignature,
MatVector imageSignatures,
FloatPointer distances)
\brief Computes Signature Quadratic Form Distance between the reference signature
and each of the other image signatures.
|
void |
PCTSignatures.computeSignatures(MatVector images,
MatVector signatures)
\brief Computes signatures for multiple images in parallel.
|
Copyright © 2020. All rights reserved.