 |
Visual Servoing Platform
version 3.2.0
|
1 #include <visp3/core/vpConfig.h>
3 #ifdef VISP_HAVE_MODULE_SENSOR
4 #include <visp3/sensor/vpV4l2Grabber.h>
5 #include <visp3/sensor/vp1394CMUGrabber.h>
6 #include <visp3/sensor/vp1394TwoGrabber.h>
7 #include <visp3/sensor/vpFlyCaptureGrabber.h>
8 #include <visp3/sensor/vpRealSense2.h>
10 #include <visp3/core/vpIoTools.h>
11 #include <visp3/core/vpXmlParserCamera.h>
12 #include <visp3/gui/vpDisplayGDI.h>
13 #include <visp3/gui/vpDisplayOpenCV.h>
14 #include <visp3/gui/vpDisplayX.h>
15 #include <visp3/io/vpImageIo.h>
16 #include <visp3/vision/vpKeyPoint.h>
18 #include <visp3/mbt/vpMbGenericTracker.h>
30 int main(
int argc,
char **argv)
32 #if defined(VISP_HAVE_OPENCV) && \
33 (defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2) )
36 std::string opt_modelname =
"teabox";
39 double opt_proj_error_threshold = 20.;
40 bool opt_use_ogre =
false;
41 bool opt_use_scanline =
false;
42 bool opt_display_projection_error =
false;
43 bool opt_learn =
false;
44 bool opt_auto_init =
false;
45 std::string opt_learning_data =
"learning/data-learned.bin";
46 std::string opt_intrinsic_file =
"";
47 std::string opt_camera_name =
"";
49 for (
int i = 0; i < argc; i++) {
50 if (std::string(argv[i]) ==
"--model") {
51 opt_modelname = std::string(argv[i + 1]);
53 else if (std::string(argv[i]) ==
"--tracker") {
54 opt_tracker = atoi(argv[i + 1]);
56 else if (std::string(argv[i]) ==
"--camera_device" && i + 1 < argc) {
57 opt_device = atoi(argv[i + 1]);
59 else if (std::string(argv[i]) ==
"--max_proj_error") {
60 opt_proj_error_threshold = atof(argv[i + 1]);
61 }
else if (std::string(argv[i]) ==
"--use_ogre") {
63 }
else if (std::string(argv[i]) ==
"--use_scanline") {
64 opt_use_scanline =
true;
65 }
else if (std::string(argv[i]) ==
"--learn") {
67 }
else if (std::string(argv[i]) ==
"--learning_data" && i+1 < argc) {
68 opt_learning_data = argv[i+1];
69 }
else if (std::string(argv[i]) ==
"--auto_init") {
71 }
else if (std::string(argv[i]) ==
"--display_proj_error") {
72 opt_display_projection_error =
true;
73 }
else if (std::string(argv[i]) ==
"--intrinsic" && i + 1 < argc) {
74 opt_intrinsic_file = std::string(argv[i + 1]);
75 }
else if (std::string(argv[i]) ==
"--camera_name" && i + 1 < argc) {
76 opt_camera_name = std::string(argv[i + 1]);
78 else if (std::string(argv[i]) ==
"--help" || std::string(argv[i]) ==
"-h") {
79 std::cout <<
"\nUsage: " << argv[0]
80 <<
" [--camera_device <camera device> (default: 0)]"
81 <<
" [--intrinsic <intrinsic file> (default: empty)]"
82 <<
" [--camera_name <camera name>] (default: empty)"
83 <<
" [--model <model name> (default: teabox)]"
84 <<
" [--tracker <0=egde|1=keypoint|2=hybrid> (default: 2)]"
85 <<
" [--use_ogre] [--use_scanline]"
86 <<
" [--max_proj_error <allowed projection error> (default: 20)]"
87 <<
" [--learn] [--auto_init] [--learning_data <data-learned.bin> (default: learning/data-learned.bin)]"
88 <<
" [--display_proj_error]"
97 if (!parentname.empty())
98 objectname = parentname +
"/" + objectname;
100 std::cout <<
"Tracker requested config files: " << objectname <<
".[init, cao]" << std::endl;
101 std::cout <<
"Tracker optional config files: " << objectname <<
".[ppm]" << std::endl;
103 std::cout <<
"Tracked features: " << std::endl;
104 std::cout <<
" Use edges : " << (opt_tracker == 0 || opt_tracker == 2) << std::endl;
105 std::cout <<
" Use klt : " << (opt_tracker == 1 || opt_tracker == 2) << std::endl;
106 std::cout <<
"Tracker options: " << std::endl;
107 std::cout <<
" Use ogre : " << opt_use_ogre << std::endl;
108 std::cout <<
" Use scanline: " << opt_use_scanline << std::endl;
109 std::cout <<
" Proj. error : " << opt_proj_error_threshold << std::endl;
110 std::cout <<
" Display proj. error: " << opt_display_projection_error << std::endl;
111 std::cout <<
"Config files: " << std::endl;
112 std::cout <<
" Config file: " <<
"\"" << objectname +
".xml" <<
"\"" << std::endl;
113 std::cout <<
" Model file : " <<
"\"" << objectname +
".cao" <<
"\"" << std::endl;
114 std::cout <<
" Init file : " <<
"\"" << objectname +
".init" <<
"\"" << std::endl;
115 std::cout <<
"Learning options : " << std::endl;
116 std::cout <<
" Learn : " << opt_learn << std::endl;
117 std::cout <<
" Auto init : " << opt_auto_init << std::endl;
118 std::cout <<
" Learning data: " << opt_learning_data << std::endl;
128 #ifdef VISP_HAVE_XML2
130 if (!opt_intrinsic_file.empty() && !opt_camera_name.empty())
139 #if defined(VISP_HAVE_V4L2)
141 std::ostringstream device;
142 device <<
"/dev/video" << opt_device;
143 std::cout <<
"Use Video 4 Linux grabber on device " << device.str() << std::endl;
147 #elif defined(VISP_HAVE_DC1394)
149 std::cout <<
"Use DC1394 grabber" << std::endl;
152 #elif defined(VISP_HAVE_CMU1394)
154 std::cout <<
"Use CMU1394 grabber" << std::endl;
157 #elif defined(VISP_HAVE_FLYCAPTURE)
159 std::cout <<
"Use FlyCapture grabber" << std::endl;
162 #elif defined(VISP_HAVE_REALSENSE2)
164 std::cout <<
"Use Realsense 2 grabber" << std::endl;
167 config.disable_stream(RS2_STREAM_DEPTH);
168 config.disable_stream(RS2_STREAM_INFRARED);
169 config.enable_stream(RS2_STREAM_COLOR, 640, 480, RS2_FORMAT_RGBA8, 30);
173 std::cout <<
"Read camera parameters from Realsense device" << std::endl;
176 #elif defined(VISP_HAVE_OPENCV)
177 std::cout <<
"Use OpenCV grabber on device " << opt_device << std::endl;
178 cv::VideoCapture g(opt_device);
180 std::cout <<
"Failed to open the camera" << std::endl;
190 #if defined(VISP_HAVE_X11)
192 #elif defined(VISP_HAVE_GDI)
197 display->init(I, 100, 100,
"Model-based tracker");
200 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
202 #elif defined(VISP_HAVE_OPENCV)
219 if (opt_tracker == 0)
221 #if defined(VISP_HAVE_MODULE_KLT) && defined(VISP_HAVE_OPENCV)
222 else if (opt_tracker == 1)
228 # if !defined(VISP_HAVE_MODULE_KLT)
229 std::cout <<
"klt and hybrid model-based tracker are not available since visp_klt module is not available. "
230 "In CMakeGUI turn visp_klt module ON, configure and build ViSP again."
233 std::cout <<
"Hybrid tracking is impossible since OpenCV is not enabled. "
234 <<
"Install OpenCV, configure and build ViSP again to run this tutorial."
244 #ifdef VISP_HAVE_XML2
254 if (opt_tracker == 0 || opt_tracker == 2) {
268 #ifdef VISP_HAVE_MODULE_KLT
269 if (opt_tracker == 1 || opt_tracker == 2) {
304 #if (defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
305 std::string detectorName =
"SIFT";
306 std::string extractorName =
"SIFT";
307 std::string matcherName =
"BruteForce";
309 std::string detectorName =
"FAST";
310 std::string extractorName =
"ORB";
311 std::string matcherName =
"BruteForce-Hamming";
314 if (opt_learn || opt_auto_init) {
318 #if !(defined(VISP_HAVE_OPENCV_NONFREE) || defined(VISP_HAVE_OPENCV_XFEATURES2D))
319 # if (VISP_HAVE_OPENCV_VERSION < 0x030000)
320 keypoint.setDetectorParameter(
"ORB",
"nLevels", 1);
322 cv::Ptr<cv::ORB> orb_detector = keypoint.
getDetector(
"ORB").dynamicCast<cv::ORB>();
324 orb_detector->setNLevels(1);
332 std::cout <<
"Cannot enable auto detection. Learning file \"" << opt_learning_data <<
"\" doesn't exist" << std::endl;
338 tracker.
initClick(I, objectname +
".init",
true);
341 bool learn_position =
false;
342 bool run_auto_init =
false;
344 run_auto_init =
true;
353 bool tracking_failed =
false;
354 #if defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_CMU1394) || defined(VISP_HAVE_FLYCAPTURE) || defined(VISP_HAVE_REALSENSE2)
356 #elif defined(VISP_HAVE_OPENCV)
365 std::cout <<
"Auto init succeed" << std::endl;
379 run_auto_init =
false;
384 tracking_failed =
true;
386 std::cout <<
"Tracker needs to restart (tracking exception)" << std::endl;
387 run_auto_init =
true;
391 if (! tracking_failed) {
392 double proj_error = 0;
402 if (proj_error > opt_proj_error_threshold) {
403 std::cout <<
"Tracker needs to restart (projection error detected: " << proj_error <<
")" << std::endl;
405 run_auto_init =
true;
407 tracking_failed =
true;
411 if (! tracking_failed) {
424 std::stringstream ss;
425 ss <<
"Translation: " << std::setprecision(5) << pose[0] <<
" " << pose[1] <<
" " << pose[2] <<
" [m]";
433 if (learn_position) {
435 std::vector<cv::KeyPoint> trainKeyPoints;
436 keypoint.
detect(I, trainKeyPoints);
439 std::vector<vpPolygon> polygons;
440 std::vector<std::vector<vpPoint> > roisPt;
441 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces();
442 polygons = pair.first;
443 roisPt = pair.second;
446 std::vector<cv::Point3f> points3f;
450 keypoint.
buildReference(I, trainKeyPoints, points3f,
true, learn_id++);
453 for (std::vector<cv::KeyPoint>::const_iterator it = trainKeyPoints.begin(); it != trainKeyPoints.end(); ++it) {
456 learn_position =
false;
457 std::cout <<
"Data learned" << std::endl;
460 std::stringstream ss;
465 else if (opt_auto_init)
475 learn_position =
true;
477 run_auto_init =
true;
484 std::cout <<
"Save learning file: " << opt_learning_data << std::endl;
492 std::cout <<
"Catch a ViSP exception: " << e << std::endl;
494 #elif defined(VISP_HAVE_OPENCV)
497 std::cout <<
"Install a 3rd party dedicated to frame grabbing (dc1394, cmu1394, v4l2, OpenCV, FlyCapture, Realsense2), configure and build ViSP again to use this example" << std::endl;
501 std::cout <<
"Install OpenCV 3rd party, configure and build ViSP again to use this example" << std::endl;
virtual void setKltOpencv(const vpKltOpencv &t)
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
void open(vpImage< unsigned char > &I)
Real-time 6D object pose tracking using its CAD model.
const std::string & getStringMessage(void) const
Send a reference (constant) related the error message (can be empty).
int parse(vpCameraParameters &cam, const std::string &filename, const std::string &camera_name, const vpCameraParameters::vpCameraParametersProjType &projModel, const unsigned int image_width=0, const unsigned int image_height=0)
void setMatcher(const std::string &matcherName)
void setMinDistance(double minDistance)
void setDevice(const std::string &devname)
void setMaxFeatures(const int maxCount)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
unsigned int buildReference(const vpImage< unsigned char > &I)
virtual void setProjectionErrorDisplay(const bool display)
Generic class defining intrinsic camera parameters.
void open(vpImage< unsigned char > &I)
void setMu2(const double &mu_2)
virtual void setCameraParameters(const vpCameraParameters &camera)
void loadLearningData(const std::string &filename, const bool binaryMode=false, const bool append=false)
XML parser to load and save intrinsic camera parameters.
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
static double deg(double rad)
void setQuality(double qualityLevel)
virtual void setMovingEdge(const vpMe &me)
virtual void loadModel(const std::string &modelFile, const bool verbose=false, const vpHomogeneousMatrix &T=vpHomogeneousMatrix())
virtual void getCameraParameters(vpCameraParameters &cam1, vpCameraParameters &cam2) const
void setHarrisFreeParameter(double harris_k)
vpCameraParameters getCameraParameters(const rs2_stream &stream, vpCameraParameters::vpCameraParametersProjType type=vpCameraParameters::perspectiveProjWithDistortion) const
void setThreshold(const double &t)
virtual int getTrackerType() const
virtual void setOgreVisibilityTest(const bool &v)
virtual double getProjectionError() const
void setExtractor(const vpFeatureDescriptorType &extractorType)
Display for windows using GDI (available on any windows 32 platform).
unsigned int matchPoint(const vpImage< unsigned char > &I)
static const vpColor yellow
void saveLearningData(const std::string &filename, const bool binaryMode=false, const bool saveTrainingImages=true)
void setRange(const unsigned int &r)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, const vpImagePoint &offset=vpImagePoint(0, 0))
void acquire(vpImage< unsigned char > &grey)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Wrapper for the KLT (Kanade-Lucas-Tomasi) feature tracker implemented in OpenCV. Thus to enable this ...
VISP_EXPORT double measureTimeMs()
virtual void loadConfigFile(const std::string &configFile)
static const vpColor green
static void display(const vpImage< unsigned char > &I)
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(const bool orderPolygons=true, const bool useVisibility=true, const bool clipPolygon=false)
virtual void initClick(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2, const bool displayHelp=false, const vpHomogeneousMatrix &T1=vpHomogeneousMatrix(), const vpHomogeneousMatrix &T2=vpHomogeneousMatrix())
virtual double computeCurrentProjectionError(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &_cMo, const vpCameraParameters &_cam)
virtual void setKltMaskBorder(const unsigned int &e)
virtual void initFromPose(const vpImage< unsigned char > &I1, const vpImage< unsigned char > &I2, const std::string &initFile1, const std::string &initFile2)
virtual void track(const vpImage< unsigned char > &I)
void setMaskNumber(const unsigned int &a)
Class that allows keypoints detection (and descriptors extraction) and matching thanks to OpenCV libr...
void detect(const vpImage< unsigned char > &I, std::vector< cv::KeyPoint > &keyPoints, const vpRect &rectangle=vpRect())
Implementation of a pose vector and operations on poses.
void setBlockSize(const int blockSize)
cv::Ptr< cv::FeatureDetector > getDetector(const vpFeatureDetectorType &type) const
void setWindowSize(const int winSize)
Class for firewire ieee1394 video devices using libdc1394-2.x api.
virtual void setTrackerType(const int type)
static const vpColor none
void setPyramidLevels(const int pyrMaxLevel)
Class that is a wrapper over the Video4Linux2 (V4L2) driver.
virtual void getPose(vpHomogeneousMatrix &c1Mo, vpHomogeneousMatrix &c2Mo) const
void setSampleStep(const double &s)
static void flush(const vpImage< unsigned char > &I)
virtual void setScanLineVisibilityTest(const bool &v)
static void displayCross(const vpImage< unsigned char > &I, const vpImagePoint &ip, unsigned int size, const vpColor &color, unsigned int thickness=1)
void open(vpImage< unsigned char > &I)
void open(const rs2::config &cfg=rs2::config())
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void setMu1(const double &mu_1)
void open(vpImage< unsigned char > &I)
Class that defines generic functionnalities for display.
virtual void setProjectionErrorComputation(const bool &flag)
void setDetector(const vpFeatureDetectorType &detectorType)
void setMaskSize(const unsigned int &a)
error that can be emited by ViSP classes.
virtual void setDisplayFeatures(const bool displayF)
void initPersProjWithoutDistortion(const double px, const double py, const double u0, const double v0)
virtual void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, const unsigned int thickness=1, const bool displayFullModel=false)