ViSP  3.0.0
servoPioneerPoint2DDepth.cpp
1 /****************************************************************************
2  *
3  * This file is part of the ViSP software.
4  * Copyright (C) 2005 - 2015 by Inria. All rights reserved.
5  *
6  * This software is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU General Public License
8  * ("GPL") version 2 as published by the Free Software Foundation.
9  * See the file LICENSE.txt at the root directory of this source
10  * distribution for additional information about the GNU GPL.
11  *
12  * For using ViSP with software that can not be combined with the GNU
13  * GPL, please contact Inria about acquiring a ViSP Professional
14  * Edition License.
15  *
16  * See http://visp.inria.fr for more information.
17  *
18  * This software was developed at:
19  * Inria Rennes - Bretagne Atlantique
20  * Campus Universitaire de Beaulieu
21  * 35042 Rennes Cedex
22  * France
23  *
24  * If you have questions regarding the use of this file, please contact
25  * Inria at visp@inria.fr
26  *
27  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
28  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
29  *
30  * Description:
31  * IBVS on Pioneer P3DX mobile platform
32  *
33  * Authors:
34  * Fabien Spindler
35  *
36  *****************************************************************************/
37 #include <iostream>
38 
39 #include <visp3/core/vpConfig.h>
40 
41 #include <visp3/robot/vpRobotPioneer.h>
42 #include <visp3/core/vpCameraParameters.h>
43 #include <visp3/gui/vpDisplayGDI.h>
44 #include <visp3/gui/vpDisplayX.h>
45 #include <visp3/blob/vpDot2.h>
46 #include <visp3/visual_features/vpFeatureBuilder.h>
47 #include <visp3/visual_features/vpFeatureDepth.h>
48 #include <visp3/visual_features/vpFeaturePoint.h>
49 #include <visp3/core/vpHomogeneousMatrix.h>
50 #include <visp3/core/vpImage.h>
51 #include <visp3/core/vpImageConvert.h>
52 #include <visp3/sensor/vp1394TwoGrabber.h>
53 #include <visp3/sensor/vp1394CMUGrabber.h>
54 #include <visp3/sensor/vpV4l2Grabber.h>
55 #include <visp3/sensor/vpOpenCVGrabber.h>
56 #include <visp3/vs/vpServo.h>
57 #include <visp3/core/vpVelocityTwistMatrix.h>
58 
59 #if defined(VISP_HAVE_DC1394) || defined(VISP_HAVE_V4L2) || defined(VISP_HAVE_CMU1394) || (VISP_HAVE_OPENCV_VERSION >= 0x020100)
60 #if defined(VISP_HAVE_X11) || defined(VISP_HAVE_GDI)
61 #if defined(VISP_HAVE_PIONEER)
62 # define TEST_COULD_BE_ACHIEVED
63 #endif
64 #endif
65 #endif
66 
67 #undef VISP_HAVE_OPENCV // To use a firewire camera
68 #undef VISP_HAVE_V4L2 // To use a firewire camera
69 
89 #ifdef TEST_COULD_BE_ACHIEVED
90 int main(int argc, char **argv)
91 {
92  try {
93  vpImage<unsigned char> I; // Create a gray level image container
94  double depth = 1.;
95  double lambda = 0.6;
96  double coef = 1./6.77; // Scale parameter used to estimate the depth Z of the blob from its surface
97 
98  vpRobotPioneer robot;
99  ArArgumentParser parser(&argc, argv);
100  parser.loadDefaultArguments();
101 
102  // ArRobotConnector connects to the robot, get some initial data from it such as type and name,
103  // and then loads parameter files for this robot.
104  ArRobotConnector robotConnector(&parser, &robot);
105  if(!robotConnector.connectRobot())
106  {
107  ArLog::log(ArLog::Terse, "Could not connect to the robot.");
108  if(parser.checkHelpAndWarnUnparsed())
109  {
110  Aria::logOptions();
111  Aria::exit(1);
112  }
113  }
114  if (!Aria::parseArgs())
115  {
116  Aria::logOptions();
117  Aria::shutdown();
118  return false;
119  }
120 
121  // Wait 3 sec to be sure that the low level Aria thread used to control
122  // the robot is started. Without this delay we experienced a delay (arround 2.2 sec)
123  // between the velocity send to the robot and the velocity that is really applied
124  // to the wheels.
125  vpTime::sleepMs(3000);
126 
127  std::cout << "Robot connected" << std::endl;
128 
129  // Camera parameters. In this experiment we don't need a precise calibration of the camera
130  vpCameraParameters cam;
131 
132  // Create the camera framegrabber
133 #if defined(VISP_HAVE_OPENCV)
134  int device = 1;
135  std::cout << "Use device: " << device << std::endl;
136  cv::VideoCapture g(device); // open the default camera
137  g.set(CV_CAP_PROP_FRAME_WIDTH, 640);
138  g.set(CV_CAP_PROP_FRAME_HEIGHT, 480);
139  if(!g.isOpened()) // check if we succeeded
140  return -1;
141  cv::Mat frame;
142  g >> frame; // get a new frame from camera
143  vpImageConvert::convert(frame, I);
144 
145  // Logitec sphere parameters
146  cam.initPersProjWithoutDistortion(558, 555, 312, 210);
147 #elif defined(VISP_HAVE_V4L2)
148  // Create a grabber based on v4l2 third party lib (for usb cameras under Linux)
149  vpV4l2Grabber g;
150  g.setScale(1);
151  g.setInput(0);
152  g.setDevice("/dev/video1");
153  g.open(I);
154  // Logitec sphere parameters
155  cam.initPersProjWithoutDistortion(558, 555, 312, 210);
156 #elif defined(VISP_HAVE_DC1394)
157  // Create a grabber based on libdc1394-2.x third party lib (for firewire cameras under Linux)
158  vp1394TwoGrabber g(false);
161  // AVT Pike 032C parameters
162  cam.initPersProjWithoutDistortion(800, 795, 320, 216);
163 #elif defined(VISP_HAVE_CMU1394)
164  // Create a grabber based on CMU 1394 third party lib (for firewire cameras under windows)
166  g.setVideoMode(0, 5); // 640x480 MONO8
167  g.setFramerate(4); // 30 Hz
168  g.open(I);
169  // AVT Pike 032C parameters
170  cam.initPersProjWithoutDistortion(800, 795, 320, 216);
171 #endif
172 
173  // Acquire an image from the grabber
174 #if defined(VISP_HAVE_OPENCV)
175  g >> frame; // get a new frame from camera
176  vpImageConvert::convert(frame, I);
177 #else
178  g.acquire(I);
179 #endif
180 
181  // Create an image viewer
182 #if defined(VISP_HAVE_X11)
183  vpDisplayX d(I, 10, 10, "Current frame");
184 #elif defined(VISP_HAVE_GDI)
185  vpDisplayGDI d(I, 10, 10, "Current frame");
186 #endif
188  vpDisplay::flush(I);
189 
190  // Create a blob tracker
191  vpDot2 dot;
192  dot.setGraphics(true);
193  dot.setComputeMoments(true);
194  dot.setEllipsoidShapePrecision(0.); // to track a blob without any constraint on the shape
195  dot.setGrayLevelPrecision(0.9); // to set the blob gray level bounds for binarisation
196  dot.setEllipsoidBadPointsPercentage(0.5); // to be accept 50% of bad inner and outside points with bad gray level
197  dot.initTracking(I);
198  vpDisplay::flush(I);
199 
200  vpServo task;
203  task.setLambda(lambda) ;
205  cVe = robot.get_cVe() ;
206  task.set_cVe(cVe) ;
207 
208  std::cout << "cVe: \n" << cVe << std::endl;
209 
210  vpMatrix eJe;
211  robot.get_eJe(eJe) ;
212  task.set_eJe(eJe) ;
213  std::cout << "eJe: \n" << eJe << std::endl;
214 
215  // Current and desired visual feature associated to the x coordinate of the point
216  vpFeaturePoint s_x, s_xd;
217 
218  // Create the current x visual feature
219  vpFeatureBuilder::create(s_x, cam, dot);
220 
221  // Create the desired x* visual feature
222  s_xd.buildFrom(0, 0, depth);
223 
224  // Add the feature
225  task.addFeature(s_x, s_xd) ;
226 
227  // Create the current log(Z/Z*) visual feature
228  vpFeatureDepth s_Z, s_Zd;
229  // Surface of the blob estimated from the image moment m00 and converted in meters
230  double surface = 1./sqrt(dot.m00/(cam.get_px()*cam.get_py()));
231  double Z, Zd;
232  // Initial depth of the blob in from of the camera
233  Z = coef * surface ;
234  // Desired depth Z* of the blob. This depth is learned and equal to the initial depth
235  Zd = Z;
236 
237  std::cout << "Z " << Z << std::endl;
238  s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z , 0); // log(Z/Z*) = 0 that's why the last parameter is 0
239  s_Zd.buildFrom(s_x.get_x(), s_x.get_y(), Zd , 0); // log(Z/Z*) = 0 that's why the last parameter is 0
240 
241  // Add the feature
242  task.addFeature(s_Z, s_Zd) ;
243 
244  vpColVector v; // vz, wx
245 
246  while(1)
247  {
248  // Acquire a new image
249 #if defined(VISP_HAVE_OPENCV) && (VISP_HAVE_OPENCV_VERSION >= 0x020100)
250  g >> frame; // get a new frame from camera
251  vpImageConvert::convert(frame, I);
252 #else
253  g.acquire(I);
254 #endif
255  // Set the image as background of the viewer
257 
258  // Does the blob tracking
259  dot.track(I);
260  // Update the current x feature
261  vpFeatureBuilder::create(s_x, cam, dot);
262 
263  // Update log(Z/Z*) feature. Since the depth Z change, we need to update the intection matrix
264  surface = 1./sqrt(dot.m00/(cam.get_px()*cam.get_py()));
265  Z = coef * surface ;
266  s_Z.buildFrom(s_x.get_x(), s_x.get_y(), Z, log(Z/Zd)) ;
267 
268  robot.get_cVe(cVe) ;
269  task.set_cVe(cVe) ;
270 
271  robot.get_eJe(eJe) ;
272  task.set_eJe(eJe) ;
273 
274  // Compute the control law. Velocities are computed in the mobile robot reference frame
275  v = task.computeControlLaw() ;
276 
277  std::cout << "Send velocity to the pionner: " << v[0] << " m/s "
278  << vpMath::deg(v[1]) << " deg/s" << std::endl;
279 
280  // Send the velocity to the robot
282 
283  // Draw a vertical line which corresponds to the desired x coordinate of the dot cog
284  vpDisplay::displayLine(I, 0, 320, 479, 320, vpColor::red);
285  vpDisplay::flush(I);
286 
287  // A click in the viewer to exit
288  if ( vpDisplay::getClick(I, false) )
289  break;
290  }
291 
292  std::cout << "Ending robot thread..." << std::endl;
293  robot.stopRunning();
294 
295  // wait for the thread to stop
296  robot.waitForRunExit();
297 
298  // Kill the servo task
299  task.print() ;
300  task.kill();
301  }
302  catch(vpException e) {
303  std::cout << "Catch an exception: " << e << std::endl;
304  return 1;
305  }
306 }
307 #else
308 int main()
309 {
310  std::cout << "You don't have the right 3rd party libraries to run this example..." << std::endl;
311 }
312 #endif
Implementation of a matrix and operations on matrices.
Definition: vpMatrix.h:92
void setVideoMode(unsigned long format, unsigned long mode)
void open(vpImage< unsigned char > &I)
static void convert(const vpImage< unsigned char > &src, vpImage< vpRGBa > &dest)
void get_eJe(vpMatrix &eJe)
void open(vpImage< unsigned char > &I)
Display for windows using GDI (available on any windows 32 platform).
Definition: vpDisplayGDI.h:128
void setEllipsoidBadPointsPercentage(const double &percentage=0.0)
Definition: vpDot2.h:285
void set_eJe(const vpMatrix &eJe_)
Definition: vpServo.h:459
vpVelocityTwistMatrix get_cVe() const
Definition: vpUnicycle.h:85
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &vel)
void buildFrom(const double x, const double y, const double Z, const double LogZoverZstar)
Define the X11 console to display images.
Definition: vpDisplayX.h:148
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, const unsigned int select=vpBasicFeature::FEATURE_ALL)
Definition: vpServo.cpp:446
void setDevice(const std::string &devname)
Class that defines a 3D point visual feature which is composed by one parameters that is that defin...
error that can be emited by ViSP classes.
Definition: vpException.h:73
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
Interface for Pioneer mobile robots based on Aria 3rd party library.
double get_py() const
This tracker is meant to track a blob (connex pixels with same gray level) on a vpImage.
Definition: vpDot2.h:124
void track(const vpImage< unsigned char > &I)
Definition: vpDot2.cpp:461
static void flush(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:2233
static const vpColor red
Definition: vpColor.h:163
void initPersProjWithoutDistortion(const double px, const double py, const double u0, const double v0)
void setGrayLevelPrecision(const double &grayLevelPrecision)
Definition: vpDot2.cpp:784
void kill()
Definition: vpServo.cpp:186
Firewire cameras video capture based on CMU 1394 Digital Camera SDK.
void setFramerate(unsigned long fps)
vpColVector computeControlLaw()
Definition: vpServo.cpp:899
void acquire(vpImage< unsigned char > &I)
static void display(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:206
VISP_EXPORT void sleepMs(double t)
Definition: vpTime.cpp:236
Generic class defining intrinsic camera parameters.
void setLambda(double c)
Definition: vpServo.h:390
void setComputeMoments(const bool activate)
Definition: vpDot2.h:271
Implementation of a velocity twist matrix and operations on such kind of matrices.
void setScale(unsigned scale=vpV4l2Grabber::DEFAULT_SCALE)
double get_px() const
void setEllipsoidShapePrecision(const double &ellipsoidShapePrecision)
Definition: vpDot2.cpp:859
void setInput(unsigned input=vpV4l2Grabber::DEFAULT_INPUT)
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Definition: vpServo.cpp:519
void buildFrom(const double x, const double y, const double Z)
Class for the Video4Linux2 video device.
static double deg(double rad)
Definition: vpMath.h:97
Implementation of column vector and the associated operations.
Definition: vpColVector.h:72
double get_y() const
double get_x() const
void set_cVe(const vpVelocityTwistMatrix &cVe_)
Definition: vpServo.h:434
void initTracking(const vpImage< unsigned char > &I, unsigned int size=0)
Definition: vpDot2.cpp:262
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:248
Class for firewire ieee1394 video devices using libdc1394-2.x api.
virtual bool getClick(bool blocking=true)=0
virtual void displayLine(const vpImagePoint &ip1, const vpImagePoint &ip2, const vpColor &color, unsigned int thickness=1)=0
double m00
Definition: vpDot2.h:363
static void create(vpFeaturePoint &s, const vpCameraParameters &cam, const vpDot &d)
void setServo(const vpServoType &servo_type)
Definition: vpServo.cpp:217
void setGraphics(const bool activate)
Definition: vpDot2.h:309
void setFramerate(vpV4l2FramerateType framerate)