41 #include <visp3/core/vpConfig.h> 43 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020301) 45 #include <opencv2/core/core.hpp> 46 #include <opencv2/features2d/features2d.hpp> 47 #include <visp3/core/vpImage.h> 48 #include <visp3/io/vpImageIo.h> 49 #include <visp3/gui/vpDisplayX.h> 50 #include <visp3/gui/vpDisplayGTK.h> 51 #include <visp3/gui/vpDisplayGDI.h> 52 #include <visp3/gui/vpDisplayOpenCV.h> 53 #include <visp3/io/vpVideoReader.h> 54 #include <visp3/core/vpIoTools.h> 55 #include <visp3/io/vpParseArgv.h> 56 #include <visp3/mbt/vpMbEdgeTracker.h> 57 #include <visp3/core/vpHomogeneousMatrix.h> 58 #include <visp3/vision/vpKeyPoint.h> 61 #define GETOPTARGS "cdh" 63 void usage(
const char *name,
const char *badparam);
64 bool getOptions(
int argc,
const char **argv,
bool &click_allowed,
bool &display);
74 void usage(
const char *name,
const char *badparam)
77 Test keypoints matching.\n\ 80 %s [-c] [-d] [-h]\n", name);
86 Disable the mouse click. Useful to automate the \n\ 87 execution of this program without human intervention.\n\ 90 Turn off the display.\n\ 96 fprintf(stdout,
"\nERROR: Bad parameter [%s]\n", badparam);
110 bool getOptions(
int argc,
const char **argv,
bool &click_allowed,
bool &display)
117 case 'c': click_allowed =
false;
break;
118 case 'd': display =
false;
break;
119 case 'h': usage(argv[0], NULL);
return false;
break;
122 usage(argv[0], optarg_);
127 if ((c == 1) || (c == -1)) {
129 usage(argv[0], NULL);
130 std::cerr <<
"ERROR: " << std::endl;
131 std::cerr <<
" Bad argument " << optarg_ << std::endl << std::endl;
144 int main(
int argc,
const char ** argv) {
146 std::string env_ipath;
147 bool opt_click_allowed =
true;
148 bool opt_display =
true;
151 if (getOptions(argc, argv, opt_click_allowed, opt_display) ==
false) {
158 if(env_ipath.empty()) {
159 std::cerr <<
"Please set the VISP_INPUT_IMAGE_PATH environment variable value." << std::endl;
174 #if defined VISP_HAVE_X11 176 #elif defined VISP_HAVE_GTK 178 #elif defined VISP_HAVE_GDI 186 display.
init(I, 0, 0,
"ORB keypoints matching");
199 #ifdef VISP_HAVE_XML2 232 if (opt_display && opt_click_allowed) {
237 vpHomogeneousMatrix cMoi(0.02044769891, 0.1101505452, 0.5078963719, 2.063603907, 1.110231561, -0.4392789872);
246 cv::Ptr<cv::FeatureDetector> detector;
247 cv::Ptr<cv::DescriptorExtractor> extractor;
248 cv::Ptr<cv::DescriptorMatcher> matcher;
250 #if (VISP_HAVE_OPENCV_VERSION >= 0x030000) 251 detector = cv::ORB::create(500, 1.2f, 1);
252 extractor = cv::ORB::create(500, 1.2f, 1);
253 #elif (VISP_HAVE_OPENCV_VERSION >= 0x020301) 254 detector = cv::FeatureDetector::create(
"ORB");
255 extractor = cv::DescriptorExtractor::create(
"ORB");
257 matcher = cv::DescriptorMatcher::create(
"BruteForce-Hamming");
259 #if (VISP_HAVE_OPENCV_VERSION >= 0x020400 && VISP_HAVE_OPENCV_VERSION < 0x030000) 260 detector->set(
"nLevels", 1);
264 std::vector<cv::KeyPoint> trainKeyPoints;
267 detector->detect(matImg, trainKeyPoints);
271 std::vector<vpPolygon> polygons;
272 std::vector<std::vector<vpPoint> > roisPt;
273 std::pair<std::vector<vpPolygon>, std::vector<std::vector<vpPoint> > > pair = tracker.
getPolygonFaces(
false);
274 polygons = pair.first;
275 roisPt = pair.second;
278 std::vector<cv::Point3f> points3f;
283 cv::Mat trainDescriptors;
284 extractor->compute(matImg, trainKeyPoints, trainDescriptors);
286 if(trainKeyPoints.size() != (size_t) trainDescriptors.rows || trainKeyPoints.size() != points3f.size()) {
287 std::cerr <<
"Problem with training data size !" << std::endl;
298 bool opt_click =
false;
300 while((opt_display && !g.
end()) || (!opt_display && g.
getFrameIndex() < 30)) {
304 std::vector<cv::KeyPoint> queryKeyPoints;
305 detector->detect(matImg, queryKeyPoints);
307 cv::Mat queryDescriptors;
308 extractor->compute(matImg, queryKeyPoints, queryDescriptors);
310 std::vector<std::vector<cv::DMatch> > knn_matches;
311 std::vector<cv::DMatch> matches;
312 matcher->knnMatch(queryDescriptors, trainDescriptors, knn_matches, 2);
313 for(std::vector<std::vector<cv::DMatch> >::const_iterator it = knn_matches.begin(); it != knn_matches.end(); ++it) {
315 double ratio = (*it)[0].distance / (*it)[1].distance;
317 matches.push_back((*it)[0]);
323 for(std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
324 vpPoint pt(points3f[(
size_t)(it->trainIdx)].x,
325 points3f[(
size_t)(it->trainIdx)].y,
326 points3f[(
size_t)(it->trainIdx)].z);
328 double x = 0.0, y = 0.0;
336 bool is_pose_estimated =
false;
337 if(estimated_pose.
npt >= 4) {
339 unsigned int nb_inliers = (
unsigned int) (0.6 * estimated_pose.
npt);
344 is_pose_estimated =
true;
346 is_pose_estimated =
false;
355 for(std::vector<cv::DMatch>::const_iterator it = matches.begin(); it != matches.end(); ++it) {
356 vpImagePoint leftPt(trainKeyPoints[(
size_t) it->trainIdx].pt.y, trainKeyPoints[(
size_t) it->trainIdx].pt.x);
357 vpImagePoint rightPt(queryKeyPoints[(
size_t) it->queryIdx].pt.y, queryKeyPoints[(
size_t) it->queryIdx].pt.x
362 if(is_pose_estimated) {
373 if (opt_click_allowed && opt_display) {
394 std::cerr << e.
what() << std::endl;
398 std::cout <<
"testKeyPoint-4 is ok !" << std::endl;
403 std::cerr <<
"You need OpenCV library." << std::endl;
virtual unsigned int getClipping() const
void setMovingEdge(const vpMe &me)
virtual void getPose(vpHomogeneousMatrix &cMo_) const
static bool getClick(const vpImage< unsigned char > &I, bool blocking=true)
virtual void setAngleDisappear(const double &a)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
Implementation of an homogeneous matrix and operations on such kind of matrices.
void setMaskNumber(const unsigned int &a)
virtual void setDownScalingFactor(unsigned int scale)
Display for windows using GDI (available on any windows 32 platform).
void setSampleStep(const double &s)
void setNbTotalSample(const int &nb)
Use the X11 console to display images on unix-like OS. Thus to enable this class X11 should be instal...
Class that enables to manipulate easily a video file or a sequence of images. As it inherits from the...
static const vpColor none
error that can be emited by ViSP classes.
void setRansacThreshold(const double &t)
void init(vpImage< unsigned char > &I, int winx=-1, int winy=-1, const std::string &title="")
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
Make the complete tracking of an object by using its CAD model.
virtual void setPose(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cdMo)
virtual void setCameraParameters(const vpCameraParameters &camera)
virtual void initFromPose(const vpImage< unsigned char > &I, const std::string &initFile)
void loadConfigFile(const std::string &configFile)
static const vpColor green
static void flush(const vpImage< unsigned char > &I)
void setMu1(const double &mu_1)
static bool parse(int *argcPtr, const char **argv, vpArgvInfo *argTable, int flags)
Class that defines what is a point.
void initPersProjWithoutDistortion(const double px, const double py, const double u0, const double v0)
virtual void setNearClippingDistance(const double &dist)
void open(vpImage< vpRGBa > &I)
static void compute3DForPointsInPolygons(const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, std::vector< cv::KeyPoint > &candidates, const std::vector< vpPolygon > &polygons, const std::vector< std::vector< vpPoint > > &roisPt, std::vector< cv::Point3f > &points, cv::Mat *descriptors=NULL)
void setMaskSize(const unsigned int &a)
static void display(const vpImage< unsigned char > &I)
The vpDisplayOpenCV allows to display image using the OpenCV library. Thus to enable this class OpenC...
Class used for pose computation from N points (pose from point only). Some of the algorithms implemen...
Generic class defining intrinsic camera parameters.
The vpDisplayGTK allows to display image using the GTK 3rd party library. Thus to enable this class G...
void acquire(vpImage< vpRGBa > &I)
bool computePose(vpPoseMethodType method, vpHomogeneousMatrix &cMo, bool(*func)(vpHomogeneousMatrix *)=NULL)
void resize(const unsigned int h, const unsigned int w)
resize the image : Image initialization
virtual void setFarClippingDistance(const double &dist)
void setFileName(const char *filename)
virtual void setAngleAppear(const double &a)
virtual void initClick(const vpImage< unsigned char > &I, const std::string &initFile, const bool displayHelp=false)
const char * what() const
static double rad(double deg)
unsigned int npt
Number of point used in pose computation.
void setRansacMaxTrials(const int &rM)
void setRansacNbInliersToReachConsensus(const unsigned int &nbC)
static void displayFrame(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, double size, const vpColor &color=vpColor::none, unsigned int thickness=1, vpImagePoint offset=vpImagePoint(0, 0))
void setMu2(const double &mu_2)
long getFrameIndex() const
virtual void loadModel(const char *modelFile, const bool verbose=false)
unsigned int getHeight() const
virtual void getCameraParameters(vpCameraParameters &camera) const
static void read(vpImage< unsigned char > &I, const std::string &filename)
unsigned int getDownScalingFactor()
void insert(const vpImage< Type > &src, const vpImagePoint topLeft)
void setThreshold(const double &t)
void display(const vpImage< unsigned char > &I, const vpHomogeneousMatrix &cMo, const vpCameraParameters &cam, const vpColor &col, const unsigned int thickness=1, const bool displayFullModel=false)
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
static void displayLine(const vpImage< unsigned char > &I, const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)
void setRange(const unsigned int &r)
virtual void setClipping(const unsigned int &flags)
void addPoint(const vpPoint &P)
unsigned int getWidth() const
virtual std::pair< std::vector< vpPolygon >, std::vector< std::vector< vpPoint > > > getPolygonFaces(const bool orderPolygons=true, const bool useVisibility=true, const bool clipPolygon=false)