OpenCV  4.5.1
Open Source Computer Vision
Decode Gray code pattern tutorial

Goal

In this tutorial you will learn how to use the GrayCodePattern class to:

  • Decode a previously acquired Gray code pattern.
  • Generate a disparity map.
  • Generate a pointcloud.

Code

/*M///////////////////////////////////////////////////////////////////////////////////////
//
// IMPORTANT: READ BEFORE DOWNLOADING, COPYING, INSTALLING OR USING.
//
// By downloading, copying, installing or using the software you agree to this license.
// If you do not agree to this license, do not download, install,
// copy or use the software.
//
//
// License Agreement
// For Open Source Computer Vision Library
//
// Copyright (C) 2015, OpenCV Foundation, all rights reserved.
// Third party copyrights are property of their respective owners.
//
// Redistribution and use in source and binary forms, with or without modification,
// are permitted provided that the following conditions are met:
//
// * Redistribution's of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// * Redistribution's in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
//
// * The name of the copyright holders may not be used to endorse or promote products
// derived from this software without specific prior written permission.
//
// This software is provided by the copyright holders and contributors "as is" and
// any express or implied warranties, including, but not limited to, the implied
// warranties of merchantability and fitness for a particular purpose are disclaimed.
// In no event shall the Intel Corporation or contributors be liable for any direct,
// indirect, incidental, special, exemplary, or consequential damages
// (including, but not limited to, procurement of substitute goods or services;
// loss of use, data, or profits; or business interruption) however caused
// and on any theory of liability, whether in contract, strict liability,
// or tort (including negligence or otherwise) arising in any way out of
// the use of this software, even if advised of the possibility of such damage.
//
//M*/
#include <iostream>
#include <opencv2/core.hpp>
#include <opencv2/opencv_modules.hpp>
// (if you did not build the opencv_viz module, you will only see the disparity images)
#ifdef HAVE_OPENCV_VIZ
#include <opencv2/viz.hpp>
#endif
using namespace std;
using namespace cv;
static const char* keys =
{ "{@images_list | | Image list where the captured pattern images are saved}"
"{@calib_param_path | | Calibration_parameters }"
"{@proj_width | | The projector width used to acquire the pattern }"
"{@proj_height | | The projector height used to acquire the pattern}"
"{@white_thresh | | The white threshold height (optional)}"
"{@black_thresh | | The black threshold (optional)}" };
static void help()
{
cout << "\nThis example shows how to use the \"Structured Light module\" to decode a previously acquired gray code pattern, generating a pointcloud"
"\nCall:\n"
"./example_structured_light_pointcloud <images_list> <calib_param_path> <proj_width> <proj_height> <white_thresh> <black_thresh>\n"
<< endl;
}
static bool readStringList( const string& filename, vector<string>& l )
{
l.resize( 0 );
FileStorage fs( filename, FileStorage::READ );
if( !fs.isOpened() )
{
cerr << "failed to open " << filename << endl;
return false;
}
FileNode n = fs.getFirstTopLevelNode();
if( n.type() != FileNode::SEQ )
{
cerr << "cam 1 images are not a sequence! FAIL" << endl;
return false;
}
FileNodeIterator it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
{
l.push_back( ( string ) *it );
}
n = fs["cam2"];
if( n.type() != FileNode::SEQ )
{
cerr << "cam 2 images are not a sequence! FAIL" << endl;
return false;
}
it = n.begin(), it_end = n.end();
for( ; it != it_end; ++it )
{
l.push_back( ( string ) *it );
}
if( l.size() % 2 != 0 )
{
cout << "Error: the image list contains odd (non-even) number of elements\n";
return false;
}
return true;
}
int main( int argc, char** argv )
{
CommandLineParser parser( argc, argv, keys );
String images_file = parser.get<String>( 0 );
String calib_file = parser.get<String>( 1 );
params.width = parser.get<int>( 2 );
params.height = parser.get<int>( 3 );
if( images_file.empty() || calib_file.empty() || params.width < 1 || params.height < 1 || argc < 5 || argc > 7 )
{
help();
return -1;
}
// Set up GraycodePattern with params
Ptr<structured_light::GrayCodePattern> graycode = structured_light::GrayCodePattern::create( params );
size_t white_thresh = 0;
size_t black_thresh = 0;
if( argc == 7 )
{
// If passed, setting the white and black threshold, otherwise using default values
white_thresh = parser.get<unsigned>( 4 );
black_thresh = parser.get<unsigned>( 5 );
graycode->setWhiteThreshold( white_thresh );
graycode->setBlackThreshold( black_thresh );
}
vector<string> imagelist;
bool ok = readStringList( images_file, imagelist );
if( !ok || imagelist.empty() )
{
cout << "can not open " << images_file << " or the string list is empty" << endl;
help();
return -1;
}
FileStorage fs( calib_file, FileStorage::READ );
if( !fs.isOpened() )
{
cout << "Failed to open Calibration Data File." << endl;
help();
return -1;
}
// Loading calibration parameters
Mat cam1intrinsics, cam1distCoeffs, cam2intrinsics, cam2distCoeffs, R, T;
fs["cam1_intrinsics"] >> cam1intrinsics;
fs["cam2_intrinsics"] >> cam2intrinsics;
fs["cam1_distorsion"] >> cam1distCoeffs;
fs["cam2_distorsion"] >> cam2distCoeffs;
fs["R"] >> R;
fs["T"] >> T;
cout << "cam1intrinsics" << endl << cam1intrinsics << endl;
cout << "cam1distCoeffs" << endl << cam1distCoeffs << endl;
cout << "cam2intrinsics" << endl << cam2intrinsics << endl;
cout << "cam2distCoeffs" << endl << cam2distCoeffs << endl;
cout << "T" << endl << T << endl << "R" << endl << R << endl;
if( (!R.data) || (!T.data) || (!cam1intrinsics.data) || (!cam2intrinsics.data) || (!cam1distCoeffs.data) || (!cam2distCoeffs.data) )
{
cout << "Failed to load cameras calibration parameters" << endl;
help();
return -1;
}
size_t numberOfPatternImages = graycode->getNumberOfPatternImages();
vector<vector<Mat> > captured_pattern;
captured_pattern.resize( 2 );
captured_pattern[0].resize( numberOfPatternImages );
captured_pattern[1].resize( numberOfPatternImages );
Mat color = imread( imagelist[numberOfPatternImages], IMREAD_COLOR );
Size imagesSize = color.size();
// Stereo rectify
cout << "Rectifying images..." << endl;
Mat R1, R2, P1, P2, Q;
Rect validRoi[2];
stereoRectify( cam1intrinsics, cam1distCoeffs, cam2intrinsics, cam2distCoeffs, imagesSize, R, T, R1, R2, P1, P2, Q, 0,
-1, imagesSize, &validRoi[0], &validRoi[1] );
Mat map1x, map1y, map2x, map2y;
initUndistortRectifyMap( cam1intrinsics, cam1distCoeffs, R1, P1, imagesSize, CV_32FC1, map1x, map1y );
initUndistortRectifyMap( cam2intrinsics, cam2distCoeffs, R2, P2, imagesSize, CV_32FC1, map2x, map2y );
// Loading pattern images
for( size_t i = 0; i < numberOfPatternImages; i++ )
{
captured_pattern[0][i] = imread( imagelist[i], IMREAD_GRAYSCALE );
captured_pattern[1][i] = imread( imagelist[i + numberOfPatternImages + 2], IMREAD_GRAYSCALE );
if( (!captured_pattern[0][i].data) || (!captured_pattern[1][i].data) )
{
cout << "Empty images" << endl;
help();
return -1;
}
remap( captured_pattern[1][i], captured_pattern[1][i], map1x, map1y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
remap( captured_pattern[0][i], captured_pattern[0][i], map2x, map2y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
}
cout << "done" << endl;
vector<Mat> blackImages;
vector<Mat> whiteImages;
blackImages.resize( 2 );
whiteImages.resize( 2 );
// Loading images (all white + all black) needed for shadows computation
cvtColor( color, whiteImages[0], COLOR_RGB2GRAY );
whiteImages[1] = imread( imagelist[2 * numberOfPatternImages + 2], IMREAD_GRAYSCALE );
blackImages[0] = imread( imagelist[numberOfPatternImages + 1], IMREAD_GRAYSCALE );
blackImages[1] = imread( imagelist[2 * numberOfPatternImages + 2 + 1], IMREAD_GRAYSCALE );
remap( color, color, map2x, map2y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
remap( whiteImages[0], whiteImages[0], map2x, map2y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
remap( whiteImages[1], whiteImages[1], map1x, map1y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
remap( blackImages[0], blackImages[0], map2x, map2y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
remap( blackImages[1], blackImages[1], map1x, map1y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
cout << endl << "Decoding pattern ..." << endl;
Mat disparityMap;
bool decoded = graycode->decode( captured_pattern, disparityMap, blackImages, whiteImages,
if( decoded )
{
cout << endl << "pattern decoded" << endl;
// To better visualize the result, apply a colormap to the computed disparity
double min;
double max;
minMaxIdx(disparityMap, &min, &max);
Mat cm_disp, scaledDisparityMap;
cout << "disp min " << min << endl << "disp max " << max << endl;
convertScaleAbs( disparityMap, scaledDisparityMap, 255 / ( max - min ) );
applyColorMap( scaledDisparityMap, cm_disp, COLORMAP_JET );
// Show the result
resize( cm_disp, cm_disp, Size( 640, 480 ), 0, 0, INTER_LINEAR_EXACT );
imshow( "cm disparity m", cm_disp );
// Compute the point cloud
Mat pointcloud;
disparityMap.convertTo( disparityMap, CV_32FC1 );
reprojectImageTo3D( disparityMap, pointcloud, Q, true, -1 );
// Compute a mask to remove background
Mat dst, thresholded_disp;
threshold( scaledDisparityMap, thresholded_disp, 0, 255, THRESH_OTSU + THRESH_BINARY );
resize( thresholded_disp, dst, Size( 640, 480 ), 0, 0, INTER_LINEAR_EXACT );
imshow( "threshold disp otsu", dst );
#ifdef HAVE_OPENCV_VIZ
// Apply the mask to the point cloud
Mat pointcloud_tresh, color_tresh;
pointcloud.copyTo( pointcloud_tresh, thresholded_disp );
color.copyTo( color_tresh, thresholded_disp );
// Show the point cloud on viz
viz::Viz3d myWindow( "Point cloud with color" );
myWindow.setBackgroundMeshLab();
myWindow.showWidget( "coosys", viz::WCoordinateSystem() );
myWindow.showWidget( "pointcloud", viz::WCloud( pointcloud_tresh, color_tresh ) );
myWindow.showWidget( "text2d", viz::WText( "Point cloud", Point(20, 20), 20, viz::Color::green() ) );
myWindow.spin();
#endif // HAVE_OPENCV_VIZ
}
return 0;
}

Explanation

First of all the needed parameters must be passed to the program. The first is the name list of previously acquired pattern images, stored in a .yaml file organized as below:

%YAML:1.0
cam1:
- "/data/pattern_cam1_im1.png"
- "/data/pattern_cam1_im2.png"
..............
- "/data/pattern_cam1_im42.png"
- "/data/pattern_cam1_im43.png"
- "/data/pattern_cam1_im44.png"
cam2:
- "/data/pattern_cam2_im1.png"
- "/data/pattern_cam2_im2.png"
..............
- "/data/pattern_cam2_im42.png"
- "/data/pattern_cam2_im43.png"
- "/data/pattern_cam2_im44.png"

For example, the dataset used for this tutorial has been acquired using a projector with a resolution of 1280x800, so 42 pattern images (from number 1 to 42) + 1 white (number 43) and 1 black (number 44) were captured with both the two cameras.

Then the cameras calibration parameters, stored in another .yml file, together with the width and the height of the projector used to project the pattern, and, optionally, the values of white and black tresholds, must be passed to the tutorial program.

In this way, GrayCodePattern class parameters can be set up with the width and the height of the projector used during the pattern acquisition and a pointer to a GrayCodePattern object can be created:

....
params.width = parser.get<int>( 2 );
params.height = parser.get<int>( 3 );
....
// Set up GraycodePattern with params
Ptr<structured_light::GrayCodePattern> graycode = structured_light::GrayCodePattern::create( params );

If the white and black thresholds are passed as parameters (these thresholds influence the number of decoded pixels), their values can be set, otherwise the algorithm will use the default values.

size_t white_thresh = 0;
size_t black_thresh = 0;
if( argc == 7 )
{
// If passed, setting the white and black threshold, otherwise using default values
white_thresh = parser.get<size_t>( 4 );
black_thresh = parser.get<size_t>( 5 );
graycode->setWhiteThreshold( white_thresh );
graycode->setBlackThreshold( black_thresh );
}

At this point, to use the decode method of GrayCodePattern class, the acquired pattern images must be stored in a vector of vector of Mat. The external vector has a size of two because two are the cameras: the first vector stores the pattern images captured from the left camera, the second those acquired from the right one. The number of pattern images is obviously the same for both cameras and can be retrieved using the getNumberOfPatternImages() method:

size_t numberOfPatternImages = graycode->getNumberOfPatternImages();
vector<vector<Mat> > captured_pattern;
captured_pattern.resize( 2 );
captured_pattern[0].resize( numberOfPatternImages );
captured_pattern[1].resize( numberOfPatternImages );
.....
for( size_t i = 0; i < numberOfPatternImages; i++ )
{
captured_pattern[0][i] = imread( imagelist[i], IMREAD_GRAYSCALE );
captured_pattern[1][i] = imread( imagelist[i + numberOfPatternImages + 2], IMREAD_GRAYSCALE );
......
}

As regards the black and white images, they must be stored in two different vectors of Mat:

vector<Mat> blackImages;
vector<Mat> whiteImages;
blackImages.resize( 2 );
whiteImages.resize( 2 );
// Loading images (all white + all black) needed for shadows computation
cvtColor( color, whiteImages[0], COLOR_RGB2GRAY );
whiteImages[1] = imread( imagelist[2 * numberOfPatternImages + 2], IMREAD_GRAYSCALE );
blackImages[0] = imread( imagelist[numberOfPatternImages + 1], IMREAD_GRAYSCALE );
blackImages[1] = imread( imagelist[2 * numberOfPatternImages + 2 + 1], IMREAD_GRAYSCALE );

It is important to underline that all the images, the pattern ones, black and white, must be loaded as grayscale images and rectified before being passed to decode method:

// Stereo rectify
cout << "Rectifying images..." << endl;
Mat R1, R2, P1, P2, Q;
Rect validRoi[2];
stereoRectify( cam1intrinsics, cam1distCoeffs, cam2intrinsics, cam2distCoeffs, imagesSize, R, T, R1, R2, P1, P2, Q, 0,
-1, imagesSize, &validRoi[0], &validRoi[1] );
Mat map1x, map1y, map2x, map2y;
initUndistortRectifyMap( cam1intrinsics, cam1distCoeffs, R1, P1, imagesSize, CV_32FC1, map1x, map1y );
initUndistortRectifyMap( cam2intrinsics, cam2distCoeffs, R2, P2, imagesSize, CV_32FC1, map2x, map2y );
........
for( size_t i = 0; i < numberOfPatternImages; i++ )
{
........
remap( captured_pattern[1][i], captured_pattern[1][i], map1x, map1y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
remap( captured_pattern[0][i], captured_pattern[0][i], map2x, map2y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
}
........
remap( color, color, map2x, map2y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
remap( whiteImages[0], whiteImages[0], map2x, map2y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
remap( whiteImages[1], whiteImages[1], map1x, map1y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
remap( blackImages[0], blackImages[0], map2x, map2y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );
remap( blackImages[1], blackImages[1], map1x, map1y, INTER_NEAREST, BORDER_CONSTANT, Scalar() );

In this way the decode method can be called to decode the pattern and to generate the corresponding disparity map, computed on the first camera (left):

Mat disparityMap;
bool decoded = graycode->decode(captured_pattern, disparityMap, blackImages, whiteImages,

To better visualize the result, a colormap is applied to the computed disparity:

double min;
double max;
minMaxIdx(disparityMap, &min, &max);
Mat cm_disp, scaledDisparityMap;
cout << "disp min " << min << endl << "disp max " << max << endl;
convertScaleAbs( disparityMap, scaledDisparityMap, 255 / ( max - min ) );
applyColorMap( scaledDisparityMap, cm_disp, COLORMAP_JET );
// Show the result
resize( cm_disp, cm_disp, Size( 640, 480 ) );
imshow( "cm disparity m", cm_disp )

At this point the point cloud can be generated using the reprojectImageTo3D method, taking care to convert the computed disparity in a CV_32FC1 Mat (decode method computes a CV_64FC1 disparity map):

Mat pointcloud;
disparityMap.convertTo( disparityMap, CV_32FC1 );
reprojectImageTo3D( disparityMap, pointcloud, Q, true, -1 );

Then a mask to remove the unwanted background is computed:

Mat dst, thresholded_disp;
threshold( scaledDisparityMap, thresholded_disp, 0, 255, THRESH_OTSU + THRESH_BINARY );
resize( thresholded_disp, dst, Size( 640, 480 ) );
imshow( "threshold disp otsu", dst );

The white image of cam1 was previously loaded also as a color image, in order to map the color of the object on its reconstructed pointcloud:

Mat color = imread( imagelist[numberOfPatternImages], IMREAD_COLOR );

The background renoval mask is thus applied to the point cloud and to the color image:

Mat pointcloud_tresh, color_tresh;
pointcloud.copyTo(pointcloud_tresh, thresholded_disp);
color.copyTo(color_tresh, thresholded_disp);

Finally the computed point cloud of the scanned object can be visualized on viz:

viz::Viz3d myWindow( "Point cloud with color");
myWindow.setBackgroundMeshLab();
myWindow.showWidget( "coosys", viz::WCoordinateSystem());
myWindow.showWidget( "pointcloud", viz::WCloud( pointcloud_tresh, color_tresh ) );
myWindow.showWidget( "text2d", viz::WText( "Point cloud", Point(20, 20), 20, viz::Color::green() ) );
myWindow.spin();

cv::THRESH_OTSU
@ THRESH_OTSU
flag, use Otsu algorithm to choose the optimal threshold value
Definition: imgproc.hpp:326
cv::String
std::string String
Definition: cvstd.hpp:150
calib3d.hpp
cv::BORDER_CONSTANT
@ BORDER_CONSTANT
iiiiii|abcdefgh|iiiiiii with some specified i
Definition: base.hpp:269
cv::imread
CV_EXPORTS_W Mat imread(const String &filename, int flags=IMREAD_COLOR)
Loads an image from a file.
cv::Rect
Rect2i Rect
Definition: types.hpp:462
cv::cvtColor
void cvtColor(InputArray src, OutputArray dst, int code, int dstCn=0)
Converts an image from one color space to another.
cv::THRESH_BINARY
@ THRESH_BINARY
Definition: imgproc.hpp:320
cv::MatExpr::max
MatExpr max(const Mat &a, const Mat &b)
cv::structured_light::GrayCodePattern::setBlackThreshold
virtual void setBlackThreshold(size_t value)=0
Sets the value for black threshold, needed for decoding (shadowsmasks computation).
cv::structured_light::GrayCodePattern::setWhiteThreshold
virtual void setWhiteThreshold(size_t value)=0
Sets the value for white threshold, needed for decoding.
cv::FileNode::begin
FileNodeIterator begin() const
returns iterator pointing to the first node element
cv::MatExpr::min
MatExpr min(const Mat &a, const Mat &b)
cv::threshold
double threshold(InputArray src, OutputArray dst, double thresh, double maxval, int type)
Applies a fixed-level threshold to each array element.
cv::waitKey
int waitKey(int delay=0)
Waits for a pressed key.
cv::FileStorage
XML/YAML/JSON file storage class that encapsulates all the information necessary for writing or readi...
Definition: persistence.hpp:304
cv::structured_light::GrayCodePattern::Params
Parameters of StructuredLightPattern constructor.
Definition: graycodepattern.hpp:77
cv::dynafu::Params
kinfu::Params Params
DynamicFusion implementation.
Definition: dynafu.hpp:44
cv::COLOR_RGB2GRAY
@ COLOR_RGB2GRAY
Definition: imgproc.hpp:547
cv::FileNode
File Storage Node class.
Definition: persistence.hpp:482
cv::Size_
Template class for specifying the size of an image or rectangle.
Definition: types.hpp:316
highgui.hpp
cv::structured_light::StructuredLightPattern::decode
virtual bool decode(const std::vector< std::vector< Mat > > &patternImages, OutputArray disparityMap, InputArrayOfArrays blackImages=noArray(), InputArrayOfArrays whiteImages=noArray(), int flags=DECODE_3D_UNDERWORLD) const =0
Decodes the structured light pattern, generating a disparity map.
cv::min
softfloat min(const softfloat &a, const softfloat &b)
Min and Max functions.
Definition: softfloat.hpp:437
cv::max
softfloat max(const softfloat &a, const softfloat &b)
Definition: softfloat.hpp:440
core.hpp
cv::convertScaleAbs
void convertScaleAbs(InputArray src, OutputArray dst, double alpha=1, double beta=0)
Scales, calculates absolute values, and converts the result to 8-bit.
cv::Mat::convertTo
void convertTo(OutputArray m, int rtype, double alpha=1, double beta=0) const
Converts an array to another data type with optional scaling.
cv::structured_light::GrayCodePattern::getNumberOfPatternImages
virtual size_t getNumberOfPatternImages() const =0
Get the number of pattern images needed for the graycode pattern.
cv::Size
Size2i Size
Definition: types.hpp:347
cv::reprojectImageTo3D
void reprojectImageTo3D(InputArray disparity, OutputArray _3dImage, InputArray Q, bool handleMissingValues=false, int ddepth=-1)
Reprojects a disparity image to 3D space.
cv::Mat::size
MatSize size
Definition: mat.hpp:2114
cv::IMREAD_GRAYSCALE
@ IMREAD_GRAYSCALE
If set, always convert image to the single channel grayscale image (codec internal conversion).
Definition: imgcodecs.hpp:71
cv::initUndistortRectifyMap
void initUndistortRectifyMap(InputArray cameraMatrix, InputArray distCoeffs, InputArray R, InputArray newCameraMatrix, Size size, int m1type, OutputArray map1, OutputArray map2)
Computes the undistortion and rectification transformation map.
structured_light.hpp
cv::INTER_LINEAR_EXACT
@ INTER_LINEAR_EXACT
Definition: imgproc.hpp:258
cv::FileNodeIterator
used to iterate through sequences and mappings.
Definition: persistence.hpp:634
viz.hpp
cv::Ptr
std::shared_ptr< _Tp > Ptr
Definition: cvstd_wrapper.hpp:23
cv::Rect_
Template class for 2D rectangles.
Definition: types.hpp:421
cv::FileNode::type
int type() const
Returns type of the node.
cv::imshow
void imshow(const String &winname, InputArray mat)
Displays an image in the specified window.
cv::Scalar
Scalar_< double > Scalar
Definition: types.hpp:669
cv::remap
void remap(InputArray src, OutputArray dst, InputArray map1, InputArray map2, int interpolation, int borderMode=BORDER_CONSTANT, const Scalar &borderValue=Scalar())
Applies a generic geometrical transformation to an image.
cv::Point
Point2i Point
Definition: types.hpp:194
cv::viz::WCloud
Clouds.
Definition: widgets.hpp:681
cv::applyColorMap
void applyColorMap(InputArray src, OutputArray dst, int colormap)
Applies a GNU Octave/MATLAB equivalent colormap on a given image.
cv::stereoRectify
void stereoRectify(InputArray cameraMatrix1, InputArray distCoeffs1, InputArray cameraMatrix2, InputArray distCoeffs2, Size imageSize, InputArray R, InputArray T, OutputArray R1, OutputArray R2, OutputArray P1, OutputArray P2, OutputArray Q, int flags=CALIB_ZERO_DISPARITY, double alpha=-1, Size newImageSize=Size(), Rect *validPixROI1=0, Rect *validPixROI2=0)
Computes rectification transforms for each head of a calibrated stereo camera.
cv::Mat
n-dimensional dense array class
Definition: mat.hpp:798
cv::viz::WText
Text and image widgets.
Definition: widgets.hpp:408
cv::Mat::copyTo
void copyTo(OutputArray m) const
Copies the matrix to another one.
cv::CommandLineParser
Designed for command line parsing.
Definition: utility.hpp:789
cv::IMREAD_COLOR
@ IMREAD_COLOR
If set, always convert image to the 3 channel BGR color image.
Definition: imgcodecs.hpp:72
cv::INTER_NEAREST
@ INTER_NEAREST
Definition: imgproc.hpp:246
cv
"black box" representation of the file storage associated with a file on disk.
Definition: affine.hpp:52
imgproc.hpp
cv::hal::resize
void resize(int src_type, const uchar *src_data, size_t src_step, int src_width, int src_height, uchar *dst_data, size_t dst_step, int dst_width, int dst_height, double inv_scale_x, double inv_scale_y, int interpolation)
cv::minMaxIdx
void minMaxIdx(InputArray src, double *minVal, double *maxVal=0, int *minIdx=0, int *maxIdx=0, InputArray mask=noArray())
Finds the global minimum and maximum in an array.
cv::viz::WCoordinateSystem
Compound widgets.
Definition: widgets.hpp:514
cv::FileNode::end
FileNodeIterator end() const
returns iterator pointing to the element following the last node element
cv::viz::Viz3d
The Viz3d class represents a 3D visualizer window. This class is implicitly shared.
Definition: viz3d.hpp:68
cv::COLORMAP_JET
@ COLORMAP_JET
Definition: imgproc.hpp:4269
CV_32FC1
#define CV_32FC1
Definition: interface.h:118
cv::structured_light::DECODE_3D_UNDERWORLD
@ DECODE_3D_UNDERWORLD
Kyriakos Herakleous, Charalambos Poullis. "3DUNDERWORLD-SLS: An Open-Source Structured-Light Scanning...
Definition: structured_light.hpp:56
cv::gapi::ie::params
PyParams params(const std::string &tag, const std::string &model, const std::string &weights, const std::string &device)