32 #ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H
33 #define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1
38 #if defined(__SUNPRO_CC) && defined(__sparc)
39 #include <sys/atomic.h>
42 #if !defined(_WIN32) || defined (__CYGWIN__)
58 __attribute((dllimport)) void __attribute__((stdcall)) Sleep (
unsigned long);
64 template<
typename _MustBeInt =
int>
65 int32_t __faa32(int32_t* __x, int32_t __inc)
67 asm volatile(
"lock xadd %0,%1"
68 :
"=__r" (__inc),
"=__m" (*__x)
74 template<
typename _MustBeInt =
int>
75 int64_t __faa64(int64_t* __x, int64_t __inc)
77 asm volatile(
"lock xadd %0,%1"
78 :
"=__r" (__inc),
"=__m" (*__x)
97 #if defined(__ICC) //x86 version
98 return _InterlockedExchangeAdd((
void*)__ptr, __addend);
99 #elif defined(__ECC) //IA-64 version
100 return _InterlockedExchangeAdd((
void*)__ptr, __addend);
101 #elif defined(__ICL) || defined(_MSC_VER)
102 return _InterlockedExchangeAdd(reinterpret_cast<volatile long*>(__ptr),
104 #elif defined(__GNUC__)
105 return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
106 #elif defined(__SUNPRO_CC) && defined(__sparc)
107 volatile int32_t __before, __after;
111 __after = __before + __addend;
112 }
while (atomic_cas_32((
volatile unsigned int*)__ptr, __before,
113 __after) != __before);
115 #else //fallback, slow
116 #pragma message("slow __fetch_and_add_32")
121 *(__ptr) += __addend;
136 #if defined(__ICC) && defined(__x86_64) //x86 version
137 return __faa64<int>((int64_t*)__ptr, __addend);
138 #elif defined(__ECC) //IA-64 version
139 return _InterlockedExchangeAdd64((
void*)__ptr, __addend);
140 #elif defined(__ICL) || defined(_MSC_VER)
142 _GLIBCXX_PARALLEL_ASSERT(
false);
145 return _InterlockedExchangeAdd64(__ptr, __addend);
147 #elif defined(__GNUC__) && defined(__x86_64)
148 return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
149 #elif defined(__GNUC__) && defined(__i386) && \
150 (defined(__i686) || defined(__pentium4) || defined(__athlon) \
151 || defined(__k8) || defined(__core2))
152 return __atomic_fetch_add(__ptr, __addend, __ATOMIC_ACQ_REL);
153 #elif defined(__SUNPRO_CC) && defined(__sparc)
154 volatile int64_t __before, __after;
158 __after = __before + __addend;
159 }
while (atomic_cas_64((
volatile unsigned long long*)__ptr, __before,
160 __after) != __before);
162 #else //fallback, slow
163 #if defined(__GNUC__) && defined(__i386)
167 #pragma message("slow __fetch_and_add_64")
172 *(__ptr) += __addend;
184 template<
typename _Tp>
188 if (
sizeof(_Tp) ==
sizeof(int32_t))
191 else if (
sizeof(_Tp) ==
sizeof(int64_t))
195 _GLIBCXX_PARALLEL_ASSERT(
false);
201 template<
typename _MustBeInt =
int>
203 __cas32(
volatile int32_t* __ptr, int32_t __old, int32_t __nw)
206 __asm__ __volatile__(
"lock; cmpxchgl %1,%2"
208 :
"q"(__nw),
"__m"(*(
volatile long long*)(__ptr)),
214 #if defined(__x86_64)
215 template<
typename _MustBeInt =
int>
217 __cas64(
volatile int64_t *__ptr, int64_t __old, int64_t __nw)
220 __asm__ __volatile__(
"lock; cmpxchgq %1,%2"
222 :
"q"(__nw),
"__m"(*(
volatile long long*)(__ptr)),
241 int32_t __replacement)
243 #if defined(__ICC) //x86 version
244 return _InterlockedCompareExchange((
void*)__ptr, __replacement,
245 __comparand) == __comparand;
246 #elif defined(__ECC) //IA-64 version
247 return _InterlockedCompareExchange((
void*)__ptr, __replacement,
248 __comparand) == __comparand;
249 #elif defined(__ICL) || defined(_MSC_VER)
250 return _InterlockedCompareExchange(
251 reinterpret_cast<volatile long*>(__ptr),
252 __replacement, __comparand)
254 #elif defined(__GNUC__)
255 return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
256 false, __ATOMIC_ACQ_REL,
258 #elif defined(__SUNPRO_CC) && defined(__sparc)
259 return atomic_cas_32((
volatile unsigned int*)__ptr, __comparand,
260 __replacement) == __comparand;
262 #pragma message("slow __compare_and_swap_32")
266 if (*__ptr == __comparand)
268 *__ptr = __replacement;
286 int64_t __replacement)
288 #if defined(__ICC) && defined(__x86_64) //x86 version
289 return __cas64<int>(__ptr, __comparand, __replacement) == __comparand;
290 #elif defined(__ECC) //IA-64 version
291 return _InterlockedCompareExchange64((
void*)__ptr, __replacement,
292 __comparand) == __comparand;
293 #elif defined(__ICL) || defined(_MSC_VER)
295 _GLIBCXX_PARALLEL_ASSERT(
false);
298 return _InterlockedCompareExchange64(__ptr, __replacement,
299 __comparand) == __comparand;
302 #elif defined(__GNUC__) && defined(__x86_64)
303 return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
304 false, __ATOMIC_ACQ_REL,
306 #elif defined(__GNUC__) && defined(__i386) && \
307 (defined(__i686) || defined(__pentium4) || defined(__athlon) \
308 || defined(__k8) || defined(__core2))
309 return __atomic_compare_exchange_n(__ptr, &__comparand, __replacement,
310 false, __ATOMIC_ACQ_REL,
312 #elif defined(__SUNPRO_CC) && defined(__sparc)
313 return atomic_cas_64((
volatile unsigned long long*)__ptr,
314 __comparand, __replacement) == __comparand;
316 #if defined(__GNUC__) && defined(__i386)
320 #pragma message("slow __compare_and_swap_64")
324 if (*__ptr == __comparand)
326 *__ptr = __replacement;
341 template<
typename _Tp>
345 if (
sizeof(_Tp) ==
sizeof(int32_t))
347 (int32_t)__comparand,
348 (int32_t)__replacement);
349 else if (
sizeof(_Tp) ==
sizeof(int64_t))
351 (int64_t)__comparand,
352 (int64_t)__replacement);
354 _GLIBCXX_PARALLEL_ASSERT(
false);
362 #if defined (_WIN32) && !defined (__CYGWIN__)
Basic types and typedefs. This file is a GNU parallel extension to the Standard C++ Library...
int64_t __fetch_and_add_64(volatile int64_t *__ptr, int64_t __addend)
Add a value to a variable, atomically.
bool __compare_and_swap(volatile _Tp *__ptr, _Tp __comparand, _Tp __replacement)
Compare *__ptr and __comparand. If equal, let *__ptr=__replacement and return true, return false otherwise.
GNU parallel code for public use.
Sequential helper functions. This file is a GNU parallel extension to the Standard C++ Library...
bool __compare_and_swap_32(volatile int32_t *__ptr, int32_t __comparand, int32_t __replacement)
Compare *__ptr and __comparand. If equal, let *__ptr=__replacement and return true, return false otherwise.
_Tp __fetch_and_add(volatile _Tp *__ptr, _Tp __addend)
Add a value to a variable, atomically.
bool __compare_and_swap_64(volatile int64_t *__ptr, int64_t __comparand, int64_t __replacement)
Compare *__ptr and __comparand. If equal, let *__ptr=__replacement and return true, return false otherwise.
int32_t __fetch_and_add_32(volatile int32_t *__ptr, int32_t __addend)
Add a value to a variable, atomically.
void __yield()
Yield the control to another thread, without waiting for the end to the time slice.