ViSP
servoAfma6Points2DCamVelocityEyeToHand.cpp
1 /****************************************************************************
2  *
3  * $Id: servoAfma6Points2DCamVelocityEyeToHand.cpp 5004 2014-11-24 08:24:18Z fspindle $
4  *
5  * This file is part of the ViSP software.
6  * Copyright (C) 2005 - 2014 by INRIA. All rights reserved.
7  *
8  * This software is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * ("GPL") version 2 as published by the Free Software Foundation.
11  * See the file LICENSE.txt at the root directory of this source
12  * distribution for additional information about the GNU GPL.
13  *
14  * For using ViSP with software that can not be combined with the GNU
15  * GPL, please contact INRIA about acquiring a ViSP Professional
16  * Edition License.
17  *
18  * See http://www.irisa.fr/lagadic/visp/visp.html for more information.
19  *
20  * This software was developed at:
21  * INRIA Rennes - Bretagne Atlantique
22  * Campus Universitaire de Beaulieu
23  * 35042 Rennes Cedex
24  * France
25  * http://www.irisa.fr/lagadic
26  *
27  * If you have questions regarding the use of this file, please contact
28  * INRIA at visp@inria.fr
29  *
30  * This file is provided AS IS with NO WARRANTY OF ANY KIND, INCLUDING THE
31  * WARRANTY OF DESIGN, MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.
32  *
33  *
34  * Description:
35  * tests the control law
36  * eye-to-hand control
37  * velocity computed in the camera frame
38  *
39  * Authors:
40  * Eric Marchand
41  *
42  *****************************************************************************/
64 #include <visp/vpConfig.h>
65 #include <visp/vpDebug.h> // Debug trace
66 #include <stdlib.h>
67 #include <cmath> // std::fabs
68 #include <limits> // numeric_limits
69 #include <list>
70 #if (defined (VISP_HAVE_AFMA6) && defined (VISP_HAVE_DC1394_2))
71 
72 #define SAVE 0
73 
74 #include <visp/vp1394TwoGrabber.h>
75 #include <visp/vpImage.h>
76 #include <visp/vpImagePoint.h>
77 #include <visp/vpMath.h>
78 #include <visp/vpHomogeneousMatrix.h>
79 #include <visp/vpFeaturePoint.h>
80 #include <visp/vpPoint.h>
81 #include <visp/vpServo.h>
82 #include <visp/vpFeatureBuilder.h>
83 #include <visp/vpRobotAfma6.h>
84 #include <visp/vpException.h>
85 #include <visp/vpMatrixException.h>
86 #include <visp/vpServoDisplay.h>
87 #include <visp/vpDot.h>
88 #include <visp/vpPose.h>
89 #include <visp/vpImageIo.h>
90 #include <visp/vpDisplay.h>
91 #include <visp/vpDisplayX.h>
92 #include <visp/vpDisplayOpenCV.h>
93 #include <visp/vpDisplayGTK.h>
94 
95 #define L 0.006
96 #define D 0
97 
98 int main()
99 {
100  try
101  {
102  vpServo task ;
103 
104  vpCameraParameters cam ;
106  int i ;
107 
111  g.open(I) ;
112 
113  g.acquire(I) ;
114 
115 #ifdef VISP_HAVE_X11
116  vpDisplayX display(I,100,100,"Current image") ;
117 #elif defined(VISP_HAVE_OPENCV)
118  vpDisplayOpenCV display(I,100,100,"Current image") ;
119 #elif defined(VISP_HAVE_GTK)
120  vpDisplayGTK display(I,100,100,"Current image") ;
121 #endif
122 
123  vpDisplay::display(I) ;
124  vpDisplay::flush(I) ;
125 
126  std::cout << std::endl ;
127  std::cout << "-------------------------------------------------------" << std::endl ;
128  std::cout << " Test program for vpServo " <<std::endl ;
129  std::cout << " Eye-to-hand task control" << std::endl ;
130  std::cout << " Simulation " << std::endl ;
131  std::cout << " task : servo a point " << std::endl ;
132  std::cout << "-------------------------------------------------------" << std::endl ;
133  std::cout << std::endl ;
134 
135  int nbPoint =7 ;
136 
137  vpDot dot[nbPoint] ;
138  vpImagePoint cog;
139 
140  for (i=0 ; i < nbPoint ; i++)
141  {
142  dot[i].initTracking(I) ;
143  dot[i].setGraphics(true) ;
144  dot[i].track(I) ;
145  vpDisplay::flush(I) ;
146  dot[i].setGraphics(false) ;
147  }
148 
149  // Compute the pose 3D model
150  vpPoint point[nbPoint] ;
151  point[0].setWorldCoordinates(-2*L,D, -3*L) ;
152  point[1].setWorldCoordinates(0,D, -3*L) ;
153  point[2].setWorldCoordinates(2*L,D, -3*L) ;
154 
155  point[3].setWorldCoordinates(-L,D,-L) ;
156  point[4].setWorldCoordinates(L,D, -L) ;
157  point[5].setWorldCoordinates(L,D, L) ;
158  point[6].setWorldCoordinates(-L,D, L) ;
159 
160  vpRobotAfma6 robot ;
161  // Update camera parameters
162  robot.getCameraParameters (cam, I);
163 
164  vpHomogeneousMatrix cMo, cdMo ;
165  vpPose pose ;
166  pose.clearPoint() ;
167  for (i=0 ; i < nbPoint ; i++)
168  {
169  cog = dot[i].getCog();
170  double x=0, y=0;
171  vpPixelMeterConversion::convertPoint(cam, cog, x, y) ;
172  point[i].set_x(x) ;
173  point[i].set_y(y) ;
174  pose.addPoint(point[i]) ;
175  }
176 
177  // compute the initial pose using Dementhon method followed by a non linear
178  // minimisation method
180 
181 
182  std::cout << cMo << std::endl ;
183  cMo.print() ;
184 
185  /*------------------------------------------------------------------
186  -- Learning the desired position
187  -- or reading the desired position
188  ------------------------------------------------------------------
189  */
190  std::cout << " Learning 0/1 " <<std::endl ;
191  char name[FILENAME_MAX] ;
192  sprintf(name,"cdMo.dat") ;
193  int learning ;
194  std::cin >> learning ;
195  if (learning ==1)
196  {
197  // save the object position
198  vpTRACE("Save the location of the object in a file cdMo.dat") ;
199  std::ofstream f(name) ;
200  cMo.save(f) ;
201  f.close() ;
202  exit(1) ;
203  }
204 
205 
206  {
207  vpTRACE("Loading desired location from cdMo.dat") ;
208  std::ifstream f("cdMo.dat") ;
209  cdMo.load(f) ;
210  f.close() ;
211  }
212 
213  vpFeaturePoint p[nbPoint], pd[nbPoint] ;
214 
215  // set the desired position of the point by forward projection using
216  // the pose cdMo
217  for (i=0 ; i < nbPoint ; i++)
218  {
219  vpColVector cP, p ;
220  point[i].changeFrame(cdMo, cP) ;
221  point[i].projection(cP, p) ;
222 
223  pd[i].set_x(p[0]) ;
224  pd[i].set_y(p[1]) ;
225  }
226 
227 
228 
229  //------------------------------------------------------------------
230 
231  vpTRACE("define the task") ;
232  vpTRACE("\t we want an eye-in-hand control law") ;
233  vpTRACE("\t robot is controlled in the camera frame") ;
236 
237 
238  for (i=0 ; i < nbPoint ; i++)
239  {
240  task.addFeature(p[i],pd[i]) ;
241  }
242 
243 
244  vpTRACE("Display task information " ) ;
245  task.print() ;
246 
247 
248  //------------------------------------------------------------------
249 
250 
251  double convergence_threshold = 0.00; //025 ;
253 
254  //-------------------------------------------------------------
255  double error =1 ;
256  unsigned int iter=0 ;
257  vpTRACE("\t loop") ;
259  vpColVector v ; // computed robot velocity
260 
261 
262  // position of the object in the effector frame
263  vpHomogeneousMatrix oMcamrobot ;
264  oMcamrobot[0][3] = -0.05 ;
265 
266  vpImage<vpRGBa> Ic ;
267  int it = 0 ;
268 
269  double lambda_av =0.1;
270  double alpha = 1 ; //1 ;
271  double beta =3 ; //3 ;
272 
273  std::cout << "alpha 0.7" << std::endl;
274  std::cin >> alpha ;
275  std::cout << "beta 5" << std::endl;
276  std::cin >> beta ;
277  std::list<vpImagePoint> Lcog ;
278  vpImagePoint ip;
279  while(error > convergence_threshold)
280  {
281  std::cout << "---------------------------------------------" << iter++ <<std::endl ;
282 
283  g.acquire(I) ;
284  vpDisplay::display(I) ;
285  ip.set_i( 265 );
286  ip.set_j( 150 );
287  vpDisplay::displayText(I, ip, "Eye-To-Hand Visual Servoing", vpColor::green) ;
288  ip.set_i( 280 );
289  ip.set_j( 150 );
290  vpDisplay::displayText(I, ip, "IRISA-INRIA Rennes, Lagadic project", vpColor::green) ;
291  try
292  {
293  for (i=0 ; i < nbPoint ; i++)
294  {
295  dot[i].track(I) ;
296  Lcog.push_back( dot[i].getCog() );
297  }
298  }
299  catch(...)
300  {
301  vpTRACE("Error detected while tracking visual features") ;
302  robot.stopMotion() ;
303  exit(1) ;
304  }
305 
306  // compute the initial pose using a non linear minimisation method
307  pose.clearPoint() ;
308 
309  for (i=0 ; i < nbPoint ; i++)
310  {
311  double x=0, y=0;
312  cog = dot[i].getCog();
313  vpPixelMeterConversion::convertPoint(cam, cog, x, y) ;
314  point[i].set_x(x) ;
315  point[i].set_y(y) ;
316 
317  vpColVector cP ;
318  point[i].changeFrame(cdMo, cP) ;
319 
320  p[i].set_x(x) ;
321  p[i].set_y(y) ;
322  p[i].set_Z(cP[2]) ;
323 
324  pose.addPoint(point[i]) ;
325 
326  point[i].display(I,cMo,cam, vpColor::green) ;
327  point[i].display(I,cdMo,cam, vpColor::blue) ;
328  }
329  pose.computePose(vpPose::LOWE, cMo) ;
330  vpDisplay::flush(I) ;
331 
333  vpHomogeneousMatrix cMe, camrobotMe ;
334  robot.get_cMe(camrobotMe) ;
335  cMe = cMo *oMcamrobot * camrobotMe ;
336 
337 
338  task.set_cVe(cMe) ;
339 
340  vpMatrix eJe ;
341  robot.get_eJe(eJe) ;
342  task.set_eJe(eJe) ;
343 
344 
345  // Compute the adaptative gain (speed up the convergence)
346  double gain ;
347  if (iter>2)
348  {
349  if (std::fabs(alpha) <= std::numeric_limits<double>::epsilon())
350  gain = lambda_av ;
351  else
352  {
353  gain = alpha * exp (-beta * ( task.getError() ).sumSquare() ) + lambda_av;
354  }
355  }
356  else gain = lambda_av ;
357  if (SAVE==1)
358  gain = gain/5 ;
359 
360  vpTRACE("%f %f %f %f %f",alpha, beta, lambda_av, ( task.getError() ).sumSquare(), gain) ;
361  task.setLambda(gain) ;
362 
363 
364  v = task.computeControlLaw() ;
365 
366  // display points trajectory
367  for (std::list<vpImagePoint>::const_iterator it_cog = Lcog.begin(); it_cog != Lcog.end(); ++it_cog)
368  {
370  }
371  vpServoDisplay::display(task,cam,I) ;
373 
374  error = ( task.getError() ).sumSquare() ;
375  std::cout << "|| s - s* || = "<< error<<std::endl ;
376 
377  if (error>7)
378  {
379  vpTRACE("Error detected while tracking visual features") ;
380  robot.stopMotion() ;
381  exit(1) ;
382  }
383 
384  // display the pose
385  // pose.display(I,cMo,cam, 0.04, vpColor::red) ;
386  // display the pose
387  // pose.display(I,cdMo,cam, 0.04, vpColor::blue) ;
388  if ((SAVE==1) && (iter %3==0))
389  {
390 
391  vpDisplay::getImage(I,Ic) ;
392  sprintf(name,"/tmp/marchand/image.%04d.ppm",it++) ;
393  vpImageIo::write(Ic,name) ;
394  }
395  }
396  v = 0 ;
399  task.kill();
400  }
401  catch (...)
402  {
403  vpERROR_TRACE(" Test failed") ;
404  return 0;
405  }
406 }
407 
408 #else
409 int
410 main()
411 {
412  vpERROR_TRACE("You do not have an afma6 robot or a firewire framegrabber connected to your computer...");
413 }
414 
415 #endif
static void write(const vpImage< unsigned char > &I, const char *filename)
Definition: vpImageIo.cpp:476
Definition of the vpMatrix class.
Definition: vpMatrix.h:98
void projection(const vpColVector &_cP, vpColVector &_p)
Projection onto the image plane of a point. Input: the 3D coordinates in the camera frame _cP...
Definition: vpPoint.cpp:132
void getCameraParameters(vpCameraParameters &cam, const unsigned int &image_width, const unsigned int &image_height) const
Definition: vpAfma6.cpp:1248
void get_eJe(vpMatrix &_eJe)
void print()
Print the matrix as a vector [T thetaU].
The class provides a data structure for the homogeneous matrices as well as a set of operations on th...
#define vpERROR_TRACE
Definition: vpDebug.h:395
#define vpTRACE
Definition: vpDebug.h:418
void set_eJe(const vpMatrix &eJe_)
Definition: vpServo.h:439
void display(const vpImage< unsigned char > &I, const vpCameraParameters &cam, const vpColor &color=vpColor::green, const unsigned int thickness=1)
Definition: vpPoint.cpp:309
static void displayText(const vpImage< unsigned char > &I, const vpImagePoint &ip, const std::string &s, const vpColor &color)
Definition: vpDisplay.cpp:887
Define the X11 console to display images.
Definition: vpDisplayX.h:152
void addFeature(vpBasicFeature &s, vpBasicFeature &s_star, const unsigned int select=vpBasicFeature::FEATURE_ALL)
Definition: vpServo.cpp:449
void track(const vpImage< unsigned char > &I)
Definition: vpDot.cpp:804
void set_x(const double x)
Set the point x coordinate in the image plane.
Definition: vpPoint.h:194
Class that defines a 2D point visual feature which is composed by two parameters that are the cartes...
static void convertPoint(const vpCameraParameters &cam, const double &u, const double &v, double &x, double &y)
Point coordinates conversion from pixel coordinates to normalized coordinates in meter...
static const vpColor green
Definition: vpColor.h:170
void acquire(vpImage< unsigned char > &I)
static void flush(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:2232
void set_y(const double y)
void load(std::ifstream &f)
Control of Irisa's gantry robot named Afma6.
Definition: vpRobotAfma6.h:214
static const vpColor red
Definition: vpColor.h:167
Class that defines what is a point.
Definition: vpPoint.h:65
void get_cMe(vpHomogeneousMatrix &_cMe) const
void set_x(const double x)
void open(vpImage< unsigned char > &I)
vpImagePoint getCog() const
Definition: vpDot.h:228
void set_i(const double ii)
Definition: vpImagePoint.h:159
void kill()
Definition: vpServo.cpp:189
Initialize the velocity controller.
Definition: vpRobot.h:70
vpColVector getError() const
Definition: vpServo.h:257
vpColVector computeControlLaw()
Definition: vpServo.cpp:902
bool computePose(vpPoseMethodType methode, vpHomogeneousMatrix &cMo, bool(*func)(vpHomogeneousMatrix *)=NULL)
compute the pose for a given method
Definition: vpPose.cpp:386
static void display(const vpImage< unsigned char > &I)
Definition: vpDisplay.cpp:210
The vpDisplayOpenCV allows to display image using the opencv library.
Class used for pose computation from N points (pose from point only).
Definition: vpPose.h:78
Generic class defining intrinsic camera parameters.
void setLambda(double c)
Definition: vpServo.h:370
void set_y(const double y)
Set the point y coordinate in the image plane.
Definition: vpPoint.h:196
The vpDisplayGTK allows to display image using the GTK+ library version 1.2.
Definition: vpDisplayGTK.h:145
vpRobot::vpRobotStateType setRobotState(vpRobot::vpRobotStateType newState)
static void getImage(const vpImage< unsigned char > &Is, vpImage< vpRGBa > &Id)
Definition: vpDisplay.cpp:328
void setInteractionMatrixType(const vpServoIteractionMatrixType &interactionMatrixType, const vpServoInversionType &interactionMatrixInversion=PSEUDO_INVERSE)
Definition: vpServo.cpp:522
void set_j(const double jj)
Definition: vpImagePoint.h:170
void save(std::ofstream &f) const
Class that provides a data structure for the column vectors as well as a set of operations on these v...
Definition: vpColVector.h:72
void setGraphics(const bool activate)
Definition: vpDot.h:355
void set_cVe(const vpVelocityTwistMatrix &cVe_)
Definition: vpServo.h:414
void setFramerate(vp1394TwoFramerateType fps)
void setVideoMode(vp1394TwoVideoModeType videomode)
void print(const vpServo::vpServoPrintType display_level=ALL, std::ostream &os=std::cout)
Definition: vpServo.cpp:251
This tracker is meant to track a dot (connected pixels with same gray level) on a vpImage...
Definition: vpDot.h:119
Class for firewire ieee1394 video devices using libdc1394-2.x api.
void setVelocity(const vpRobot::vpControlFrameType frame, const vpColVector &velocity)
virtual bool getClick(bool blocking=true)=0
Class that defines a 2D point in an image. This class is useful for image processing and stores only ...
Definition: vpImagePoint.h:93
void changeFrame(const vpHomogeneousMatrix &cMo, vpColVector &_cP)
Definition: vpPoint.cpp:150
void addPoint(const vpPoint &P)
Add a new point in this array.
Definition: vpPose.cpp:155
void setServo(const vpServoType &servo_type)
Definition: vpServo.cpp:220
void initTracking(const vpImage< unsigned char > &I)
Definition: vpDot.cpp:658
static void display(const vpServo &s, const vpCameraParameters &cam, const vpImage< unsigned char > &I, vpColor currentColor=vpColor::green, vpColor desiredColor=vpColor::red, unsigned int thickness=1)
virtual void displayPoint(const vpImagePoint &ip, const vpColor &color)=0
static const vpColor blue
Definition: vpColor.h:173
void setWorldCoordinates(const double ox, const double oy, const double oz)
Set the point world coordinates. We mean here the coordinates of the point in the object frame...
Definition: vpPoint.cpp:74
void clearPoint()
suppress all the point in the array of point
Definition: vpPose.cpp:133