Point Cloud Library (PCL)  1.8.1
utils.hpp
1 /*
2  * Software License Agreement (BSD License)
3  *
4  * Point Cloud Library (PCL) - www.pointclouds.org
5  * Copyright (c) 2011, Willow Garage, Inc.
6  *
7  * All rights reserved.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  *
13  * * Redistributions of source code must retain the above copyright
14  * notice, this list of conditions and the following disclaimer.
15  * * Redistributions in binary form must reproduce the above
16  * copyright notice, this list of conditions and the following
17  * disclaimer in the documentation and/or other materials provided
18  * with the distribution.
19  * * Neither the name of Willow Garage, Inc. nor the names of its
20  * contributors may be used to endorse or promote products derived
21  * from this software without specific prior written permission.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
26  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
27  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
28  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
29  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
30  * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
31  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
33  * ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
34  * POSSIBILITY OF SUCH DAMAGE.
35  *
36  */
37 
38 
39 #ifndef PCL_GPU_KINFU_CUDA_UTILS_HPP_
40 #define PCL_GPU_KINFU_CUDA_UTILS_HPP_
41 
42 
43 namespace pcl
44 {
45  namespace device
46  {
47  template <class T>
48  __device__ __host__ __forceinline__ void swap ( T& a, T& b )
49  {
50  T c(a); a=b; b=c;
51  }
52 
53  template<typename T> struct numeric_limits;
54 
55  template<> struct numeric_limits<float>
56  {
57  __device__ __forceinline__ static float
58  quiet_NaN() { return __int_as_float(0x7fffffff); /*CUDART_NAN_F*/ };
59  __device__ __forceinline__ static float
60  epsilon() { return 1.192092896e-07f/*FLT_EPSILON*/; };
61 
62  __device__ __forceinline__ static float
63  min() { return 1.175494351e-38f/*FLT_MIN*/; };
64  __device__ __forceinline__ static float
65  max() { return 3.402823466e+38f/*FLT_MAX*/; };
66  };
67 
68  template<> struct numeric_limits<short>
69  {
70  __device__ __forceinline__ static short
71  max() { return SHRT_MAX; };
72  };
73 
74  __device__ __forceinline__ float
75  dot(const float3& v1, const float3& v2)
76  {
77  return v1.x * v2.x + v1.y*v2.y + v1.z*v2.z;
78  }
79 
80  __device__ __forceinline__ float3&
81  operator+=(float3& vec, const float& v)
82  {
83  vec.x += v; vec.y += v; vec.z += v; return vec;
84  }
85 
86  __device__ __forceinline__ float3
87  operator+(const float3& v1, const float3& v2)
88  {
89  return make_float3(v1.x + v2.x, v1.y + v2.y, v1.z + v2.z);
90  }
91 
92  __device__ __forceinline__ float3&
93  operator*=(float3& vec, const float& v)
94  {
95  vec.x *= v; vec.y *= v; vec.z *= v; return vec;
96  }
97 
98  __device__ __forceinline__ float3
99  operator-(const float3& v1, const float3& v2)
100  {
101  return make_float3(v1.x - v2.x, v1.y - v2.y, v1.z - v2.z);
102  }
103 
104  __device__ __forceinline__ float3
105  operator*(const float3& v1, const float& v)
106  {
107  return make_float3(v1.x * v, v1.y * v, v1.z * v);
108  }
109 
110  __device__ __forceinline__ float
111  norm(const float3& v)
112  {
113  return sqrt(dot(v, v));
114  }
115 
116  __device__ __forceinline__ float3
117  normalized(const float3& v)
118  {
119  return v * rsqrt(dot(v, v));
120  }
121 
122  __device__ __host__ __forceinline__ float3
123  cross(const float3& v1, const float3& v2)
124  {
125  return make_float3(v1.y * v2.z - v1.z * v2.y, v1.z * v2.x - v1.x * v2.z, v1.x * v2.y - v1.y * v2.x);
126  }
127 
128  __device__ __forceinline__ void computeRoots2(const float& b, const float& c, float3& roots)
129  {
130  roots.x = 0.f;
131  float d = b * b - 4.f * c;
132  if (d < 0.f) // no real roots!!!! THIS SHOULD NOT HAPPEN!
133  d = 0.f;
134 
135  float sd = sqrtf(d);
136 
137  roots.z = 0.5f * (b + sd);
138  roots.y = 0.5f * (b - sd);
139  }
140 
141  __device__ __forceinline__ void
142  computeRoots3(float c0, float c1, float c2, float3& roots)
143  {
144  if ( fabsf(c0) < numeric_limits<float>::epsilon())// one root is 0 -> quadratic equation
145  {
146  computeRoots2 (c2, c1, roots);
147  }
148  else
149  {
150  const float s_inv3 = 1.f/3.f;
151  const float s_sqrt3 = sqrtf(3.f);
152  // Construct the parameters used in classifying the roots of the equation
153  // and in solving the equation for the roots in closed form.
154  float c2_over_3 = c2 * s_inv3;
155  float a_over_3 = (c1 - c2*c2_over_3)*s_inv3;
156  if (a_over_3 > 0.f)
157  a_over_3 = 0.f;
158 
159  float half_b = 0.5f * (c0 + c2_over_3 * (2.f * c2_over_3 * c2_over_3 - c1));
160 
161  float q = half_b * half_b + a_over_3 * a_over_3 * a_over_3;
162  if (q > 0.f)
163  q = 0.f;
164 
165  // Compute the eigenvalues by solving for the roots of the polynomial.
166  float rho = sqrtf(-a_over_3);
167  float theta = atan2f (sqrtf (-q), half_b)*s_inv3;
168  float cos_theta = __cosf (theta);
169  float sin_theta = __sinf (theta);
170  roots.x = c2_over_3 + 2.f * rho * cos_theta;
171  roots.y = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta);
172  roots.z = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta);
173 
174  // Sort in increasing order.
175  if (roots.x >= roots.y)
176  swap(roots.x, roots.y);
177 
178  if (roots.y >= roots.z)
179  {
180  swap(roots.y, roots.z);
181 
182  if (roots.x >= roots.y)
183  swap (roots.x, roots.y);
184  }
185  if (roots.x <= 0) // eigenval for symetric positive semi-definite matrix can not be negative! Set it to 0
186  computeRoots2 (c2, c1, roots);
187  }
188  }
189 
190  struct Eigen33
191  {
192  public:
193  template<int Rows>
194  struct MiniMat
195  {
196  float3 data[Rows];
197  __device__ __host__ __forceinline__ float3& operator[](int i) { return data[i]; }
198  __device__ __host__ __forceinline__ const float3& operator[](int i) const { return data[i]; }
199  };
200  typedef MiniMat<3> Mat33;
201  typedef MiniMat<4> Mat43;
202 
203 
204  static __forceinline__ __device__ float3
205  unitOrthogonal (const float3& src)
206  {
207  float3 perp;
208  /* Let us compute the crossed product of *this with a vector
209  * that is not too close to being colinear to *this.
210  */
211 
212  /* unless the x and y coords are both close to zero, we can
213  * simply take ( -y, x, 0 ) and normalize it.
214  */
215  if(!isMuchSmallerThan(src.x, src.z) || !isMuchSmallerThan(src.y, src.z))
216  {
217  float invnm = rsqrtf(src.x*src.x + src.y*src.y);
218  perp.x = -src.y * invnm;
219  perp.y = src.x * invnm;
220  perp.z = 0.0f;
221  }
222  /* if both x and y are close to zero, then the vector is close
223  * to the z-axis, so it's far from colinear to the x-axis for instance.
224  * So we take the crossed product with (1,0,0) and normalize it.
225  */
226  else
227  {
228  float invnm = rsqrtf(src.z * src.z + src.y * src.y);
229  perp.x = 0.0f;
230  perp.y = -src.z * invnm;
231  perp.z = src.y * invnm;
232  }
233 
234  return perp;
235  }
236 
237  __device__ __forceinline__
238  Eigen33(volatile float* mat_pkg_arg) : mat_pkg(mat_pkg_arg) {}
239  __device__ __forceinline__ void
240  compute(Mat33& tmp, Mat33& vec_tmp, Mat33& evecs, float3& evals)
241  {
242  // Scale the matrix so its entries are in [-1,1]. The scaling is applied
243  // only when at least one matrix entry has magnitude larger than 1.
244 
245  float max01 = fmaxf( fabsf(mat_pkg[0]), fabsf(mat_pkg[1]) );
246  float max23 = fmaxf( fabsf(mat_pkg[2]), fabsf(mat_pkg[3]) );
247  float max45 = fmaxf( fabsf(mat_pkg[4]), fabsf(mat_pkg[5]) );
248  float m0123 = fmaxf( max01, max23);
249  float scale = fmaxf( max45, m0123);
250 
251  if (scale <= numeric_limits<float>::min())
252  scale = 1.f;
253 
254  mat_pkg[0] /= scale;
255  mat_pkg[1] /= scale;
256  mat_pkg[2] /= scale;
257  mat_pkg[3] /= scale;
258  mat_pkg[4] /= scale;
259  mat_pkg[5] /= scale;
260 
261  // The characteristic equation is x^3 - c2*x^2 + c1*x - c0 = 0. The
262  // eigenvalues are the roots to this equation, all guaranteed to be
263  // real-valued, because the matrix is symmetric.
264  float c0 = m00() * m11() * m22()
265  + 2.f * m01() * m02() * m12()
266  - m00() * m12() * m12()
267  - m11() * m02() * m02()
268  - m22() * m01() * m01();
269  float c1 = m00() * m11() -
270  m01() * m01() +
271  m00() * m22() -
272  m02() * m02() +
273  m11() * m22() -
274  m12() * m12();
275  float c2 = m00() + m11() + m22();
276 
277  computeRoots3(c0, c1, c2, evals);
278 
279  if(evals.z - evals.x <= numeric_limits<float>::epsilon())
280  {
281  evecs[0] = make_float3(1.f, 0.f, 0.f);
282  evecs[1] = make_float3(0.f, 1.f, 0.f);
283  evecs[2] = make_float3(0.f, 0.f, 1.f);
284  }
285  else if (evals.y - evals.x <= numeric_limits<float>::epsilon() )
286  {
287  // first and second equal
288  tmp[0] = row0(); tmp[1] = row1(); tmp[2] = row2();
289  tmp[0].x -= evals.z; tmp[1].y -= evals.z; tmp[2].z -= evals.z;
290 
291  vec_tmp[0] = cross(tmp[0], tmp[1]);
292  vec_tmp[1] = cross(tmp[0], tmp[2]);
293  vec_tmp[2] = cross(tmp[1], tmp[2]);
294 
295  float len1 = dot (vec_tmp[0], vec_tmp[0]);
296  float len2 = dot (vec_tmp[1], vec_tmp[1]);
297  float len3 = dot (vec_tmp[2], vec_tmp[2]);
298 
299  if (len1 >= len2 && len1 >= len3)
300  {
301  evecs[2] = vec_tmp[0] * rsqrtf (len1);
302  }
303  else if (len2 >= len1 && len2 >= len3)
304  {
305  evecs[2] = vec_tmp[1] * rsqrtf (len2);
306  }
307  else
308  {
309  evecs[2] = vec_tmp[2] * rsqrtf (len3);
310  }
311 
312  evecs[1] = unitOrthogonal(evecs[2]);
313  evecs[0] = cross(evecs[1], evecs[2]);
314  }
315  else if (evals.z - evals.y <= numeric_limits<float>::epsilon() )
316  {
317  // second and third equal
318  tmp[0] = row0(); tmp[1] = row1(); tmp[2] = row2();
319  tmp[0].x -= evals.x; tmp[1].y -= evals.x; tmp[2].z -= evals.x;
320 
321  vec_tmp[0] = cross(tmp[0], tmp[1]);
322  vec_tmp[1] = cross(tmp[0], tmp[2]);
323  vec_tmp[2] = cross(tmp[1], tmp[2]);
324 
325  float len1 = dot(vec_tmp[0], vec_tmp[0]);
326  float len2 = dot(vec_tmp[1], vec_tmp[1]);
327  float len3 = dot(vec_tmp[2], vec_tmp[2]);
328 
329  if (len1 >= len2 && len1 >= len3)
330  {
331  evecs[0] = vec_tmp[0] * rsqrtf(len1);
332  }
333  else if (len2 >= len1 && len2 >= len3)
334  {
335  evecs[0] = vec_tmp[1] * rsqrtf(len2);
336  }
337  else
338  {
339  evecs[0] = vec_tmp[2] * rsqrtf(len3);
340  }
341 
342  evecs[1] = unitOrthogonal( evecs[0] );
343  evecs[2] = cross(evecs[0], evecs[1]);
344  }
345  else
346  {
347 
348  tmp[0] = row0(); tmp[1] = row1(); tmp[2] = row2();
349  tmp[0].x -= evals.z; tmp[1].y -= evals.z; tmp[2].z -= evals.z;
350 
351  vec_tmp[0] = cross(tmp[0], tmp[1]);
352  vec_tmp[1] = cross(tmp[0], tmp[2]);
353  vec_tmp[2] = cross(tmp[1], tmp[2]);
354 
355  float len1 = dot(vec_tmp[0], vec_tmp[0]);
356  float len2 = dot(vec_tmp[1], vec_tmp[1]);
357  float len3 = dot(vec_tmp[2], vec_tmp[2]);
358 
359  float mmax[3];
360 
361  unsigned int min_el = 2;
362  unsigned int max_el = 2;
363  if (len1 >= len2 && len1 >= len3)
364  {
365  mmax[2] = len1;
366  evecs[2] = vec_tmp[0] * rsqrtf (len1);
367  }
368  else if (len2 >= len1 && len2 >= len3)
369  {
370  mmax[2] = len2;
371  evecs[2] = vec_tmp[1] * rsqrtf (len2);
372  }
373  else
374  {
375  mmax[2] = len3;
376  evecs[2] = vec_tmp[2] * rsqrtf (len3);
377  }
378 
379  tmp[0] = row0(); tmp[1] = row1(); tmp[2] = row2();
380  tmp[0].x -= evals.y; tmp[1].y -= evals.y; tmp[2].z -= evals.y;
381 
382  vec_tmp[0] = cross(tmp[0], tmp[1]);
383  vec_tmp[1] = cross(tmp[0], tmp[2]);
384  vec_tmp[2] = cross(tmp[1], tmp[2]);
385 
386  len1 = dot(vec_tmp[0], vec_tmp[0]);
387  len2 = dot(vec_tmp[1], vec_tmp[1]);
388  len3 = dot(vec_tmp[2], vec_tmp[2]);
389 
390  if (len1 >= len2 && len1 >= len3)
391  {
392  mmax[1] = len1;
393  evecs[1] = vec_tmp[0] * rsqrtf (len1);
394  min_el = len1 <= mmax[min_el] ? 1 : min_el;
395  max_el = len1 > mmax[max_el] ? 1 : max_el;
396  }
397  else if (len2 >= len1 && len2 >= len3)
398  {
399  mmax[1] = len2;
400  evecs[1] = vec_tmp[1] * rsqrtf (len2);
401  min_el = len2 <= mmax[min_el] ? 1 : min_el;
402  max_el = len2 > mmax[max_el] ? 1 : max_el;
403  }
404  else
405  {
406  mmax[1] = len3;
407  evecs[1] = vec_tmp[2] * rsqrtf (len3);
408  min_el = len3 <= mmax[min_el] ? 1 : min_el;
409  max_el = len3 > mmax[max_el] ? 1 : max_el;
410  }
411 
412  tmp[0] = row0(); tmp[1] = row1(); tmp[2] = row2();
413  tmp[0].x -= evals.x; tmp[1].y -= evals.x; tmp[2].z -= evals.x;
414 
415  vec_tmp[0] = cross(tmp[0], tmp[1]);
416  vec_tmp[1] = cross(tmp[0], tmp[2]);
417  vec_tmp[2] = cross(tmp[1], tmp[2]);
418 
419  len1 = dot (vec_tmp[0], vec_tmp[0]);
420  len2 = dot (vec_tmp[1], vec_tmp[1]);
421  len3 = dot (vec_tmp[2], vec_tmp[2]);
422 
423 
424  if (len1 >= len2 && len1 >= len3)
425  {
426  mmax[0] = len1;
427  evecs[0] = vec_tmp[0] * rsqrtf (len1);
428  min_el = len3 <= mmax[min_el] ? 0 : min_el;
429  max_el = len3 > mmax[max_el] ? 0 : max_el;
430  }
431  else if (len2 >= len1 && len2 >= len3)
432  {
433  mmax[0] = len2;
434  evecs[0] = vec_tmp[1] * rsqrtf (len2);
435  min_el = len3 <= mmax[min_el] ? 0 : min_el;
436  max_el = len3 > mmax[max_el] ? 0 : max_el;
437  }
438  else
439  {
440  mmax[0] = len3;
441  evecs[0] = vec_tmp[2] * rsqrtf (len3);
442  min_el = len3 <= mmax[min_el] ? 0 : min_el;
443  max_el = len3 > mmax[max_el] ? 0 : max_el;
444  }
445 
446  unsigned mid_el = 3 - min_el - max_el;
447  evecs[min_el] = normalized( cross( evecs[(min_el+1) % 3], evecs[(min_el+2) % 3] ) );
448  evecs[mid_el] = normalized( cross( evecs[(mid_el+1) % 3], evecs[(mid_el+2) % 3] ) );
449  }
450  // Rescale back to the original size.
451  evals *= scale;
452  }
453  private:
454  volatile float* mat_pkg;
455 
456  __device__ __forceinline__ float m00() const { return mat_pkg[0]; }
457  __device__ __forceinline__ float m01() const { return mat_pkg[1]; }
458  __device__ __forceinline__ float m02() const { return mat_pkg[2]; }
459  __device__ __forceinline__ float m10() const { return mat_pkg[1]; }
460  __device__ __forceinline__ float m11() const { return mat_pkg[3]; }
461  __device__ __forceinline__ float m12() const { return mat_pkg[4]; }
462  __device__ __forceinline__ float m20() const { return mat_pkg[2]; }
463  __device__ __forceinline__ float m21() const { return mat_pkg[4]; }
464  __device__ __forceinline__ float m22() const { return mat_pkg[5]; }
465 
466  __device__ __forceinline__ float3 row0() const { return make_float3( m00(), m01(), m02() ); }
467  __device__ __forceinline__ float3 row1() const { return make_float3( m10(), m11(), m12() ); }
468  __device__ __forceinline__ float3 row2() const { return make_float3( m20(), m21(), m22() ); }
469 
470  __device__ __forceinline__ static bool isMuchSmallerThan (float x, float y)
471  {
472  // copied from <eigen>/include/Eigen/src/Core/NumTraits.h
474  return x * x <= prec_sqr * y * y;
475  }
476  };
477 
478  struct Block
479  {
480  static __device__ __forceinline__ unsigned int stride()
481  {
482  return blockDim.x * blockDim.y * blockDim.z;
483  }
484 
485  static __device__ __forceinline__ int
487  {
488  return threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
489  }
490 
491  template<int CTA_SIZE, typename T, class BinOp>
492  static __device__ __forceinline__ void reduce(volatile T* buffer, BinOp op)
493  {
494  int tid = flattenedThreadId();
495  T val = buffer[tid];
496 
497  if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
498  if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
499  if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
500  if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
501 
502  if (tid < 32)
503  {
504  if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
505  if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
506  if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
507  if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
508  if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
509  if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
510  }
511  }
512 
513  template<int CTA_SIZE, typename T, class BinOp>
514  static __device__ __forceinline__ T reduce(volatile T* buffer, T init, BinOp op)
515  {
516  int tid = flattenedThreadId();
517  T val = buffer[tid] = init;
518  __syncthreads();
519 
520  if (CTA_SIZE >= 1024) { if (tid < 512) buffer[tid] = val = op(val, buffer[tid + 512]); __syncthreads(); }
521  if (CTA_SIZE >= 512) { if (tid < 256) buffer[tid] = val = op(val, buffer[tid + 256]); __syncthreads(); }
522  if (CTA_SIZE >= 256) { if (tid < 128) buffer[tid] = val = op(val, buffer[tid + 128]); __syncthreads(); }
523  if (CTA_SIZE >= 128) { if (tid < 64) buffer[tid] = val = op(val, buffer[tid + 64]); __syncthreads(); }
524 
525  if (tid < 32)
526  {
527  if (CTA_SIZE >= 64) { buffer[tid] = val = op(val, buffer[tid + 32]); }
528  if (CTA_SIZE >= 32) { buffer[tid] = val = op(val, buffer[tid + 16]); }
529  if (CTA_SIZE >= 16) { buffer[tid] = val = op(val, buffer[tid + 8]); }
530  if (CTA_SIZE >= 8) { buffer[tid] = val = op(val, buffer[tid + 4]); }
531  if (CTA_SIZE >= 4) { buffer[tid] = val = op(val, buffer[tid + 2]); }
532  if (CTA_SIZE >= 2) { buffer[tid] = val = op(val, buffer[tid + 1]); }
533  }
534  __syncthreads();
535  return buffer[0];
536  }
537  };
538 
539  struct Warp
540  {
541  enum
542  {
543  LOG_WARP_SIZE = 5,
544  WARP_SIZE = 1 << LOG_WARP_SIZE,
545  STRIDE = WARP_SIZE
546  };
547 
548  /** \brief Returns the warp lane ID of the calling thread. */
549  static __device__ __forceinline__ unsigned int
551  {
552  unsigned int ret;
553  asm("mov.u32 %0, %laneid;" : "=r"(ret) );
554  return ret;
555  }
556 
557  static __device__ __forceinline__ unsigned int id()
558  {
559  int tid = threadIdx.z * blockDim.x * blockDim.y + threadIdx.y * blockDim.x + threadIdx.x;
560  return tid >> LOG_WARP_SIZE;
561  }
562 
563  static __device__ __forceinline__
565  {
566 #if (__CUDA_ARCH__ >= 200)
567  unsigned int ret;
568  asm("mov.u32 %0, %lanemask_lt;" : "=r"(ret) );
569  return ret;
570 #else
571  return 0xFFFFFFFF >> (32 - laneId());
572 #endif
573  }
574 
575  static __device__ __forceinline__ int binaryExclScan(int ballot_mask)
576  {
577  return __popc(Warp::laneMaskLt() & ballot_mask);
578  }
579  };
580 
581 
582  struct Emulation
583  {
584  static __device__ __forceinline__ int
585  warp_reduce ( volatile int *ptr , const unsigned int tid)
586  {
587  const unsigned int lane = tid & 31; // index of thread in warp (0..31)
588 
589  if (lane < 16)
590  {
591  int partial = ptr[tid];
592 
593  ptr[tid] = partial = partial + ptr[tid + 16];
594  ptr[tid] = partial = partial + ptr[tid + 8];
595  ptr[tid] = partial = partial + ptr[tid + 4];
596  ptr[tid] = partial = partial + ptr[tid + 2];
597  ptr[tid] = partial = partial + ptr[tid + 1];
598  }
599  return ptr[tid - lane];
600  }
601 
602  static __forceinline__ __device__ int
603  Ballot(int predicate, volatile int* cta_buffer)
604  {
605 #if __CUDA_ARCH__ >= 200
606  (void)cta_buffer;
607  return __ballot(predicate);
608 #else
609  int tid = Block::flattenedThreadId();
610  cta_buffer[tid] = predicate ? (1 << (tid & 31)) : 0;
611  return warp_reduce(cta_buffer, tid);
612 #endif
613  }
614 
615  static __forceinline__ __device__ bool
616  All(int predicate, volatile int* cta_buffer)
617  {
618 #if __CUDA_ARCH__ >= 200
619  (void)cta_buffer;
620  return __all(predicate);
621 #else
622  int tid = Block::flattenedThreadId();
623  cta_buffer[tid] = predicate ? 1 : 0;
624  return warp_reduce(cta_buffer, tid) == 32;
625 #endif
626  }
627  };
628  }
629 }
630 
631 #endif /* PCL_GPU_KINFU_CUDA_UTILS_HPP_ */
__device__ __host__ __forceinline__ void swap(T &a, T &b)
Definition: utils.hpp:48
static __device__ __forceinline__ unsigned int stride()
Definition: utils.hpp:480
static __device__ __forceinline__ void reduce(volatile T *buffer, BinOp op)
Definition: utils.hpp:492
__device__ __forceinline__ float3 & operator+=(float3 &vec, const float &v)
Definition: utils.hpp:81
static __device__ __forceinline__ T reduce(volatile T *buffer, T init, BinOp op)
Definition: utils.hpp:514
__device__ __forceinline__ float3 operator-(const float3 &v1, const float3 &v2)
Definition: utils.hpp:99
__device__ static __forceinline__ float quiet_NaN()
Definition: utils.hpp:58
__device__ __forceinline__ float3 normalized(const float3 &v)
Definition: utils.hpp:117
__device__ __forceinline__ T warp_reduce(volatile T *ptr, const unsigned int tid=threadIdx.x)
Definition: warp_reduce.hpp:46
MiniMat< 4 > Mat43
Definition: utils.hpp:201
__device__ __host__ __forceinline__ const float3 & operator[](int i) const
Definition: utils.hpp:198
__device__ __forceinline__ float3 operator*(const Mat33 &m, const float3 &vec)
Definition: device.hpp:76
__device__ __host__ __forceinline__ float3 cross(const float3 &v1, const float3 &v2)
Definition: utils.hpp:123
static __device__ __forceinline__ int binaryExclScan(int ballot_mask)
Definition: utils.hpp:575
__device__ static __forceinline__ float max()
Definition: utils.hpp:65
static __device__ __forceinline__ unsigned int id()
Definition: utils.hpp:557
__device__ __forceinline__ void compute(Mat33 &tmp, Mat33 &vec_tmp, Mat33 &evecs, float3 &evals)
Definition: utils.hpp:240
static __device__ __forceinline__ int laneMaskLt()
Definition: utils.hpp:564
__device__ static __forceinline__ float min()
Definition: utils.hpp:63
__device__ __forceinline__ float dot(const float3 &v1, const float3 &v2)
Definition: utils.hpp:75
__device__ static __forceinline__ type epsilon()
Definition: limits.hpp:49
static __forceinline__ __device__ bool All(int predicate, volatile int *cta_buffer)
Definition: utils.hpp:616
static __forceinline__ __device__ int Ballot(int predicate, volatile int *cta_buffer)
Definition: utils.hpp:603
static __forceinline__ __device__ float3 unitOrthogonal(const float3 &src)
Definition: utils.hpp:205
__device__ __forceinline__ float3 & operator*=(float3 &vec, const float &v)
Definition: utils.hpp:93
__device__ __forceinline__ void computeRoots2(const float &b, const float &c, float3 &roots)
Definition: eigen.hpp:101
__device__ __forceinline__ float3 operator+(const float3 &v1, const float3 &v2)
Definition: utils.hpp:87
static __device__ __forceinline__ int flattenedThreadId()
Definition: utils.hpp:486
static __device__ __forceinline__ unsigned int laneId()
Returns the warp lane ID of the calling thread.
Definition: utils.hpp:550
__device__ __host__ __forceinline__ float norm(const float3 &v1, const float3 &v2)
__device__ static __forceinline__ float epsilon()
Definition: utils.hpp:60
__device__ static __forceinline__ short max()
Definition: utils.hpp:71
__device__ __forceinline__ Eigen33(volatile float *mat_pkg_arg)
Definition: utils.hpp:238
MiniMat< 3 > Mat33
Definition: utils.hpp:200
__device__ __host__ __forceinline__ float3 & operator[](int i)
Definition: utils.hpp:197
__device__ __forceinline__ void computeRoots3(float c0, float c1, float c2, float3 &roots)
Definition: eigen.hpp:114
static __device__ __forceinline__ int warp_reduce(volatile int *ptr, const unsigned int tid)
Definition: utils.hpp:585