22 #include "kmp_wait_release.h" 23 #include "kmp_wrapper_getpid.h" 25 #include "tsan_annotations.h" 28 #include <sys/syscall.h> 46 void __kmp_validate_locks(
void) {
51 x = ~((kmp_uint32)0) - 2;
54 for (i = 0; i < 8; ++i, ++x, ++y) {
55 kmp_uint32 z = (x - y);
59 KMP_ASSERT(offsetof(kmp_base_queuing_lock, tail_id) % 8 == 0);
73 static kmp_int32 __kmp_get_tas_lock_owner(kmp_tas_lock_t *lck) {
74 return KMP_LOCK_STRIP(KMP_ATOMIC_LD_RLX(&lck->lk.poll)) - 1;
77 static inline bool __kmp_is_tas_lock_nestable(kmp_tas_lock_t *lck) {
78 return lck->lk.depth_locked != -1;
81 __forceinline
static int 82 __kmp_acquire_tas_lock_timed_template(kmp_tas_lock_t *lck, kmp_int32 gtid) {
85 #ifdef USE_LOCK_PROFILE 86 kmp_uint32 curr = KMP_LOCK_STRIP(lck->lk.poll);
87 if ((curr != 0) && (curr != gtid + 1))
88 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
92 kmp_int32 tas_free = KMP_LOCK_FREE(tas);
93 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas);
95 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
96 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
97 KMP_FSYNC_ACQUIRED(lck);
98 return KMP_LOCK_ACQUIRED_FIRST;
102 KMP_FSYNC_PREPARE(lck);
103 KMP_INIT_YIELD(spins);
104 if (TCR_4(__kmp_nth) > (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
107 KMP_YIELD_SPIN(spins);
110 kmp_backoff_t backoff = __kmp_spin_backoff_params;
111 while (KMP_ATOMIC_LD_RLX(&lck->lk.poll) != tas_free ||
112 !__kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
113 __kmp_spin_backoff(&backoff);
114 if (TCR_4(__kmp_nth) >
115 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
118 KMP_YIELD_SPIN(spins);
121 KMP_FSYNC_ACQUIRED(lck);
122 return KMP_LOCK_ACQUIRED_FIRST;
125 int __kmp_acquire_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
126 int retval = __kmp_acquire_tas_lock_timed_template(lck, gtid);
127 ANNOTATE_TAS_ACQUIRED(lck);
131 static int __kmp_acquire_tas_lock_with_checks(kmp_tas_lock_t *lck,
133 char const *
const func =
"omp_set_lock";
134 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
135 __kmp_is_tas_lock_nestable(lck)) {
136 KMP_FATAL(LockNestableUsedAsSimple, func);
138 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) == gtid)) {
139 KMP_FATAL(LockIsAlreadyOwned, func);
141 return __kmp_acquire_tas_lock(lck, gtid);
144 int __kmp_test_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
145 kmp_int32 tas_free = KMP_LOCK_FREE(tas);
146 kmp_int32 tas_busy = KMP_LOCK_BUSY(gtid + 1, tas);
147 if (KMP_ATOMIC_LD_RLX(&lck->lk.poll) == tas_free &&
148 __kmp_atomic_compare_store_acq(&lck->lk.poll, tas_free, tas_busy)) {
149 KMP_FSYNC_ACQUIRED(lck);
155 static int __kmp_test_tas_lock_with_checks(kmp_tas_lock_t *lck,
157 char const *
const func =
"omp_test_lock";
158 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
159 __kmp_is_tas_lock_nestable(lck)) {
160 KMP_FATAL(LockNestableUsedAsSimple, func);
162 return __kmp_test_tas_lock(lck, gtid);
165 int __kmp_release_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
168 KMP_FSYNC_RELEASING(lck);
169 ANNOTATE_TAS_RELEASED(lck);
170 KMP_ATOMIC_ST_REL(&lck->lk.poll, KMP_LOCK_FREE(tas));
173 KMP_YIELD(TCR_4(__kmp_nth) >
174 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
175 return KMP_LOCK_RELEASED;
178 static int __kmp_release_tas_lock_with_checks(kmp_tas_lock_t *lck,
180 char const *
const func =
"omp_unset_lock";
182 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
183 __kmp_is_tas_lock_nestable(lck)) {
184 KMP_FATAL(LockNestableUsedAsSimple, func);
186 if (__kmp_get_tas_lock_owner(lck) == -1) {
187 KMP_FATAL(LockUnsettingFree, func);
189 if ((gtid >= 0) && (__kmp_get_tas_lock_owner(lck) >= 0) &&
190 (__kmp_get_tas_lock_owner(lck) != gtid)) {
191 KMP_FATAL(LockUnsettingSetByAnother, func);
193 return __kmp_release_tas_lock(lck, gtid);
196 void __kmp_init_tas_lock(kmp_tas_lock_t *lck) {
197 lck->lk.poll = KMP_LOCK_FREE(tas);
200 void __kmp_destroy_tas_lock(kmp_tas_lock_t *lck) { lck->lk.poll = 0; }
202 static void __kmp_destroy_tas_lock_with_checks(kmp_tas_lock_t *lck) {
203 char const *
const func =
"omp_destroy_lock";
204 if ((
sizeof(kmp_tas_lock_t) <= OMP_LOCK_T_SIZE) &&
205 __kmp_is_tas_lock_nestable(lck)) {
206 KMP_FATAL(LockNestableUsedAsSimple, func);
208 if (__kmp_get_tas_lock_owner(lck) != -1) {
209 KMP_FATAL(LockStillOwned, func);
211 __kmp_destroy_tas_lock(lck);
216 int __kmp_acquire_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
217 KMP_DEBUG_ASSERT(gtid >= 0);
219 if (__kmp_get_tas_lock_owner(lck) == gtid) {
220 lck->lk.depth_locked += 1;
221 return KMP_LOCK_ACQUIRED_NEXT;
223 __kmp_acquire_tas_lock_timed_template(lck, gtid);
224 ANNOTATE_TAS_ACQUIRED(lck);
225 lck->lk.depth_locked = 1;
226 return KMP_LOCK_ACQUIRED_FIRST;
230 static int __kmp_acquire_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
232 char const *
const func =
"omp_set_nest_lock";
233 if (!__kmp_is_tas_lock_nestable(lck)) {
234 KMP_FATAL(LockSimpleUsedAsNestable, func);
236 return __kmp_acquire_nested_tas_lock(lck, gtid);
239 int __kmp_test_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
242 KMP_DEBUG_ASSERT(gtid >= 0);
244 if (__kmp_get_tas_lock_owner(lck) == gtid) {
245 retval = ++lck->lk.depth_locked;
246 }
else if (!__kmp_test_tas_lock(lck, gtid)) {
250 retval = lck->lk.depth_locked = 1;
255 static int __kmp_test_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
257 char const *
const func =
"omp_test_nest_lock";
258 if (!__kmp_is_tas_lock_nestable(lck)) {
259 KMP_FATAL(LockSimpleUsedAsNestable, func);
261 return __kmp_test_nested_tas_lock(lck, gtid);
264 int __kmp_release_nested_tas_lock(kmp_tas_lock_t *lck, kmp_int32 gtid) {
265 KMP_DEBUG_ASSERT(gtid >= 0);
268 if (--(lck->lk.depth_locked) == 0) {
269 __kmp_release_tas_lock(lck, gtid);
270 return KMP_LOCK_RELEASED;
272 return KMP_LOCK_STILL_HELD;
275 static int __kmp_release_nested_tas_lock_with_checks(kmp_tas_lock_t *lck,
277 char const *
const func =
"omp_unset_nest_lock";
279 if (!__kmp_is_tas_lock_nestable(lck)) {
280 KMP_FATAL(LockSimpleUsedAsNestable, func);
282 if (__kmp_get_tas_lock_owner(lck) == -1) {
283 KMP_FATAL(LockUnsettingFree, func);
285 if (__kmp_get_tas_lock_owner(lck) != gtid) {
286 KMP_FATAL(LockUnsettingSetByAnother, func);
288 return __kmp_release_nested_tas_lock(lck, gtid);
291 void __kmp_init_nested_tas_lock(kmp_tas_lock_t *lck) {
292 __kmp_init_tas_lock(lck);
293 lck->lk.depth_locked = 0;
296 void __kmp_destroy_nested_tas_lock(kmp_tas_lock_t *lck) {
297 __kmp_destroy_tas_lock(lck);
298 lck->lk.depth_locked = 0;
301 static void __kmp_destroy_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
302 char const *
const func =
"omp_destroy_nest_lock";
303 if (!__kmp_is_tas_lock_nestable(lck)) {
304 KMP_FATAL(LockSimpleUsedAsNestable, func);
306 if (__kmp_get_tas_lock_owner(lck) != -1) {
307 KMP_FATAL(LockStillOwned, func);
309 __kmp_destroy_nested_tas_lock(lck);
322 static kmp_int32 __kmp_get_futex_lock_owner(kmp_futex_lock_t *lck) {
323 return KMP_LOCK_STRIP((TCR_4(lck->lk.poll) >> 1)) - 1;
326 static inline bool __kmp_is_futex_lock_nestable(kmp_futex_lock_t *lck) {
327 return lck->lk.depth_locked != -1;
330 __forceinline
static int 331 __kmp_acquire_futex_lock_timed_template(kmp_futex_lock_t *lck, kmp_int32 gtid) {
332 kmp_int32 gtid_code = (gtid + 1) << 1;
336 #ifdef USE_LOCK_PROFILE 337 kmp_uint32 curr = KMP_LOCK_STRIP(TCR_4(lck->lk.poll));
338 if ((curr != 0) && (curr != gtid_code))
339 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
343 KMP_FSYNC_PREPARE(lck);
344 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d entering\n",
345 lck, lck->lk.poll, gtid));
349 while ((poll_val = KMP_COMPARE_AND_STORE_RET32(
350 &(lck->lk.poll), KMP_LOCK_FREE(futex),
351 KMP_LOCK_BUSY(gtid_code, futex))) != KMP_LOCK_FREE(futex)) {
353 kmp_int32 cond = KMP_LOCK_STRIP(poll_val) & 1;
356 (
"__kmp_acquire_futex_lock: lck:%p, T#%d poll_val = 0x%x cond = 0x%x\n",
357 lck, gtid, poll_val, cond));
368 if (!KMP_COMPARE_AND_STORE_REL32(&(lck->lk.poll), poll_val,
369 poll_val | KMP_LOCK_BUSY(1, futex))) {
372 (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d can't set bit 0\n",
373 lck, lck->lk.poll, gtid));
376 poll_val |= KMP_LOCK_BUSY(1, futex);
379 (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d bit 0 set\n", lck,
380 lck->lk.poll, gtid));
385 (
"__kmp_acquire_futex_lock: lck:%p, T#%d before futex_wait(0x%x)\n",
386 lck, gtid, poll_val));
389 if ((rc = syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAIT, poll_val, NULL,
391 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p, T#%d futex_wait(0x%x) " 392 "failed (rc=%d errno=%d)\n",
393 lck, gtid, poll_val, rc, errno));
398 (
"__kmp_acquire_futex_lock: lck:%p, T#%d after futex_wait(0x%x)\n",
399 lck, gtid, poll_val));
406 KMP_FSYNC_ACQUIRED(lck);
407 KA_TRACE(1000, (
"__kmp_acquire_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
408 lck->lk.poll, gtid));
409 return KMP_LOCK_ACQUIRED_FIRST;
412 int __kmp_acquire_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
413 int retval = __kmp_acquire_futex_lock_timed_template(lck, gtid);
414 ANNOTATE_FUTEX_ACQUIRED(lck);
418 static int __kmp_acquire_futex_lock_with_checks(kmp_futex_lock_t *lck,
420 char const *
const func =
"omp_set_lock";
421 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
422 __kmp_is_futex_lock_nestable(lck)) {
423 KMP_FATAL(LockNestableUsedAsSimple, func);
425 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) == gtid)) {
426 KMP_FATAL(LockIsAlreadyOwned, func);
428 return __kmp_acquire_futex_lock(lck, gtid);
431 int __kmp_test_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
432 if (KMP_COMPARE_AND_STORE_ACQ32(&(lck->lk.poll), KMP_LOCK_FREE(futex),
433 KMP_LOCK_BUSY((gtid + 1) << 1, futex))) {
434 KMP_FSYNC_ACQUIRED(lck);
440 static int __kmp_test_futex_lock_with_checks(kmp_futex_lock_t *lck,
442 char const *
const func =
"omp_test_lock";
443 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
444 __kmp_is_futex_lock_nestable(lck)) {
445 KMP_FATAL(LockNestableUsedAsSimple, func);
447 return __kmp_test_futex_lock(lck, gtid);
450 int __kmp_release_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
453 KA_TRACE(1000, (
"__kmp_release_futex_lock: lck:%p(0x%x), T#%d entering\n",
454 lck, lck->lk.poll, gtid));
456 KMP_FSYNC_RELEASING(lck);
457 ANNOTATE_FUTEX_RELEASED(lck);
459 kmp_int32 poll_val = KMP_XCHG_FIXED32(&(lck->lk.poll), KMP_LOCK_FREE(futex));
462 (
"__kmp_release_futex_lock: lck:%p, T#%d released poll_val = 0x%x\n",
463 lck, gtid, poll_val));
465 if (KMP_LOCK_STRIP(poll_val) & 1) {
467 (
"__kmp_release_futex_lock: lck:%p, T#%d futex_wake 1 thread\n",
469 syscall(__NR_futex, &(lck->lk.poll), FUTEX_WAKE, KMP_LOCK_BUSY(1, futex),
475 KA_TRACE(1000, (
"__kmp_release_futex_lock: lck:%p(0x%x), T#%d exiting\n", lck,
476 lck->lk.poll, gtid));
478 KMP_YIELD(TCR_4(__kmp_nth) >
479 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
480 return KMP_LOCK_RELEASED;
483 static int __kmp_release_futex_lock_with_checks(kmp_futex_lock_t *lck,
485 char const *
const func =
"omp_unset_lock";
487 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
488 __kmp_is_futex_lock_nestable(lck)) {
489 KMP_FATAL(LockNestableUsedAsSimple, func);
491 if (__kmp_get_futex_lock_owner(lck) == -1) {
492 KMP_FATAL(LockUnsettingFree, func);
494 if ((gtid >= 0) && (__kmp_get_futex_lock_owner(lck) >= 0) &&
495 (__kmp_get_futex_lock_owner(lck) != gtid)) {
496 KMP_FATAL(LockUnsettingSetByAnother, func);
498 return __kmp_release_futex_lock(lck, gtid);
501 void __kmp_init_futex_lock(kmp_futex_lock_t *lck) {
502 TCW_4(lck->lk.poll, KMP_LOCK_FREE(futex));
505 void __kmp_destroy_futex_lock(kmp_futex_lock_t *lck) { lck->lk.poll = 0; }
507 static void __kmp_destroy_futex_lock_with_checks(kmp_futex_lock_t *lck) {
508 char const *
const func =
"omp_destroy_lock";
509 if ((
sizeof(kmp_futex_lock_t) <= OMP_LOCK_T_SIZE) &&
510 __kmp_is_futex_lock_nestable(lck)) {
511 KMP_FATAL(LockNestableUsedAsSimple, func);
513 if (__kmp_get_futex_lock_owner(lck) != -1) {
514 KMP_FATAL(LockStillOwned, func);
516 __kmp_destroy_futex_lock(lck);
521 int __kmp_acquire_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
522 KMP_DEBUG_ASSERT(gtid >= 0);
524 if (__kmp_get_futex_lock_owner(lck) == gtid) {
525 lck->lk.depth_locked += 1;
526 return KMP_LOCK_ACQUIRED_NEXT;
528 __kmp_acquire_futex_lock_timed_template(lck, gtid);
529 ANNOTATE_FUTEX_ACQUIRED(lck);
530 lck->lk.depth_locked = 1;
531 return KMP_LOCK_ACQUIRED_FIRST;
535 static int __kmp_acquire_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
537 char const *
const func =
"omp_set_nest_lock";
538 if (!__kmp_is_futex_lock_nestable(lck)) {
539 KMP_FATAL(LockSimpleUsedAsNestable, func);
541 return __kmp_acquire_nested_futex_lock(lck, gtid);
544 int __kmp_test_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
547 KMP_DEBUG_ASSERT(gtid >= 0);
549 if (__kmp_get_futex_lock_owner(lck) == gtid) {
550 retval = ++lck->lk.depth_locked;
551 }
else if (!__kmp_test_futex_lock(lck, gtid)) {
555 retval = lck->lk.depth_locked = 1;
560 static int __kmp_test_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
562 char const *
const func =
"omp_test_nest_lock";
563 if (!__kmp_is_futex_lock_nestable(lck)) {
564 KMP_FATAL(LockSimpleUsedAsNestable, func);
566 return __kmp_test_nested_futex_lock(lck, gtid);
569 int __kmp_release_nested_futex_lock(kmp_futex_lock_t *lck, kmp_int32 gtid) {
570 KMP_DEBUG_ASSERT(gtid >= 0);
573 if (--(lck->lk.depth_locked) == 0) {
574 __kmp_release_futex_lock(lck, gtid);
575 return KMP_LOCK_RELEASED;
577 return KMP_LOCK_STILL_HELD;
580 static int __kmp_release_nested_futex_lock_with_checks(kmp_futex_lock_t *lck,
582 char const *
const func =
"omp_unset_nest_lock";
584 if (!__kmp_is_futex_lock_nestable(lck)) {
585 KMP_FATAL(LockSimpleUsedAsNestable, func);
587 if (__kmp_get_futex_lock_owner(lck) == -1) {
588 KMP_FATAL(LockUnsettingFree, func);
590 if (__kmp_get_futex_lock_owner(lck) != gtid) {
591 KMP_FATAL(LockUnsettingSetByAnother, func);
593 return __kmp_release_nested_futex_lock(lck, gtid);
596 void __kmp_init_nested_futex_lock(kmp_futex_lock_t *lck) {
597 __kmp_init_futex_lock(lck);
598 lck->lk.depth_locked = 0;
601 void __kmp_destroy_nested_futex_lock(kmp_futex_lock_t *lck) {
602 __kmp_destroy_futex_lock(lck);
603 lck->lk.depth_locked = 0;
606 static void __kmp_destroy_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
607 char const *
const func =
"omp_destroy_nest_lock";
608 if (!__kmp_is_futex_lock_nestable(lck)) {
609 KMP_FATAL(LockSimpleUsedAsNestable, func);
611 if (__kmp_get_futex_lock_owner(lck) != -1) {
612 KMP_FATAL(LockStillOwned, func);
614 __kmp_destroy_nested_futex_lock(lck);
617 #endif // KMP_USE_FUTEX 622 static kmp_int32 __kmp_get_ticket_lock_owner(kmp_ticket_lock_t *lck) {
623 return std::atomic_load_explicit(&lck->lk.owner_id,
624 std::memory_order_relaxed) -
628 static inline bool __kmp_is_ticket_lock_nestable(kmp_ticket_lock_t *lck) {
629 return std::atomic_load_explicit(&lck->lk.depth_locked,
630 std::memory_order_relaxed) != -1;
633 static kmp_uint32 __kmp_bakery_check(
void *now_serving, kmp_uint32 my_ticket) {
634 return std::atomic_load_explicit((std::atomic<unsigned> *)now_serving,
635 std::memory_order_acquire) == my_ticket;
638 __forceinline
static int 639 __kmp_acquire_ticket_lock_timed_template(kmp_ticket_lock_t *lck,
641 kmp_uint32 my_ticket = std::atomic_fetch_add_explicit(
642 &lck->lk.next_ticket, 1U, std::memory_order_relaxed);
644 #ifdef USE_LOCK_PROFILE 645 if (std::atomic_load_explicit(&lck->lk.now_serving,
646 std::memory_order_relaxed) != my_ticket)
647 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
651 if (std::atomic_load_explicit(&lck->lk.now_serving,
652 std::memory_order_acquire) == my_ticket) {
653 return KMP_LOCK_ACQUIRED_FIRST;
655 KMP_WAIT_YIELD_PTR(&lck->lk.now_serving, my_ticket, __kmp_bakery_check, lck);
656 return KMP_LOCK_ACQUIRED_FIRST;
659 int __kmp_acquire_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
660 int retval = __kmp_acquire_ticket_lock_timed_template(lck, gtid);
661 ANNOTATE_TICKET_ACQUIRED(lck);
665 static int __kmp_acquire_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
667 char const *
const func =
"omp_set_lock";
669 if (!std::atomic_load_explicit(&lck->lk.initialized,
670 std::memory_order_relaxed)) {
671 KMP_FATAL(LockIsUninitialized, func);
673 if (lck->lk.self != lck) {
674 KMP_FATAL(LockIsUninitialized, func);
676 if (__kmp_is_ticket_lock_nestable(lck)) {
677 KMP_FATAL(LockNestableUsedAsSimple, func);
679 if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) == gtid)) {
680 KMP_FATAL(LockIsAlreadyOwned, func);
683 __kmp_acquire_ticket_lock(lck, gtid);
685 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
686 std::memory_order_relaxed);
687 return KMP_LOCK_ACQUIRED_FIRST;
690 int __kmp_test_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
691 kmp_uint32 my_ticket = std::atomic_load_explicit(&lck->lk.next_ticket,
692 std::memory_order_relaxed);
694 if (std::atomic_load_explicit(&lck->lk.now_serving,
695 std::memory_order_relaxed) == my_ticket) {
696 kmp_uint32 next_ticket = my_ticket + 1;
697 if (std::atomic_compare_exchange_strong_explicit(
698 &lck->lk.next_ticket, &my_ticket, next_ticket,
699 std::memory_order_acquire, std::memory_order_acquire)) {
706 static int __kmp_test_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
708 char const *
const func =
"omp_test_lock";
710 if (!std::atomic_load_explicit(&lck->lk.initialized,
711 std::memory_order_relaxed)) {
712 KMP_FATAL(LockIsUninitialized, func);
714 if (lck->lk.self != lck) {
715 KMP_FATAL(LockIsUninitialized, func);
717 if (__kmp_is_ticket_lock_nestable(lck)) {
718 KMP_FATAL(LockNestableUsedAsSimple, func);
721 int retval = __kmp_test_ticket_lock(lck, gtid);
724 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
725 std::memory_order_relaxed);
730 int __kmp_release_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
731 kmp_uint32 distance = std::atomic_load_explicit(&lck->lk.next_ticket,
732 std::memory_order_relaxed) -
733 std::atomic_load_explicit(&lck->lk.now_serving,
734 std::memory_order_relaxed);
736 ANNOTATE_TICKET_RELEASED(lck);
737 std::atomic_fetch_add_explicit(&lck->lk.now_serving, 1U,
738 std::memory_order_release);
741 (kmp_uint32)(__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
742 return KMP_LOCK_RELEASED;
745 static int __kmp_release_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
747 char const *
const func =
"omp_unset_lock";
749 if (!std::atomic_load_explicit(&lck->lk.initialized,
750 std::memory_order_relaxed)) {
751 KMP_FATAL(LockIsUninitialized, func);
753 if (lck->lk.self != lck) {
754 KMP_FATAL(LockIsUninitialized, func);
756 if (__kmp_is_ticket_lock_nestable(lck)) {
757 KMP_FATAL(LockNestableUsedAsSimple, func);
759 if (__kmp_get_ticket_lock_owner(lck) == -1) {
760 KMP_FATAL(LockUnsettingFree, func);
762 if ((gtid >= 0) && (__kmp_get_ticket_lock_owner(lck) >= 0) &&
763 (__kmp_get_ticket_lock_owner(lck) != gtid)) {
764 KMP_FATAL(LockUnsettingSetByAnother, func);
766 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
767 return __kmp_release_ticket_lock(lck, gtid);
770 void __kmp_init_ticket_lock(kmp_ticket_lock_t *lck) {
771 lck->lk.location = NULL;
773 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
774 std::memory_order_relaxed);
775 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
776 std::memory_order_relaxed);
777 std::atomic_store_explicit(
778 &lck->lk.owner_id, 0,
779 std::memory_order_relaxed);
780 std::atomic_store_explicit(
781 &lck->lk.depth_locked, -1,
782 std::memory_order_relaxed);
783 std::atomic_store_explicit(&lck->lk.initialized,
true,
784 std::memory_order_release);
787 void __kmp_destroy_ticket_lock(kmp_ticket_lock_t *lck) {
788 std::atomic_store_explicit(&lck->lk.initialized,
false,
789 std::memory_order_release);
791 lck->lk.location = NULL;
792 std::atomic_store_explicit(&lck->lk.next_ticket, 0U,
793 std::memory_order_relaxed);
794 std::atomic_store_explicit(&lck->lk.now_serving, 0U,
795 std::memory_order_relaxed);
796 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
797 std::atomic_store_explicit(&lck->lk.depth_locked, -1,
798 std::memory_order_relaxed);
801 static void __kmp_destroy_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
802 char const *
const func =
"omp_destroy_lock";
804 if (!std::atomic_load_explicit(&lck->lk.initialized,
805 std::memory_order_relaxed)) {
806 KMP_FATAL(LockIsUninitialized, func);
808 if (lck->lk.self != lck) {
809 KMP_FATAL(LockIsUninitialized, func);
811 if (__kmp_is_ticket_lock_nestable(lck)) {
812 KMP_FATAL(LockNestableUsedAsSimple, func);
814 if (__kmp_get_ticket_lock_owner(lck) != -1) {
815 KMP_FATAL(LockStillOwned, func);
817 __kmp_destroy_ticket_lock(lck);
822 int __kmp_acquire_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
823 KMP_DEBUG_ASSERT(gtid >= 0);
825 if (__kmp_get_ticket_lock_owner(lck) == gtid) {
826 std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
827 std::memory_order_relaxed);
828 return KMP_LOCK_ACQUIRED_NEXT;
830 __kmp_acquire_ticket_lock_timed_template(lck, gtid);
831 ANNOTATE_TICKET_ACQUIRED(lck);
832 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
833 std::memory_order_relaxed);
834 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
835 std::memory_order_relaxed);
836 return KMP_LOCK_ACQUIRED_FIRST;
840 static int __kmp_acquire_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
842 char const *
const func =
"omp_set_nest_lock";
844 if (!std::atomic_load_explicit(&lck->lk.initialized,
845 std::memory_order_relaxed)) {
846 KMP_FATAL(LockIsUninitialized, func);
848 if (lck->lk.self != lck) {
849 KMP_FATAL(LockIsUninitialized, func);
851 if (!__kmp_is_ticket_lock_nestable(lck)) {
852 KMP_FATAL(LockSimpleUsedAsNestable, func);
854 return __kmp_acquire_nested_ticket_lock(lck, gtid);
857 int __kmp_test_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
860 KMP_DEBUG_ASSERT(gtid >= 0);
862 if (__kmp_get_ticket_lock_owner(lck) == gtid) {
863 retval = std::atomic_fetch_add_explicit(&lck->lk.depth_locked, 1,
864 std::memory_order_relaxed) +
866 }
else if (!__kmp_test_ticket_lock(lck, gtid)) {
869 std::atomic_store_explicit(&lck->lk.depth_locked, 1,
870 std::memory_order_relaxed);
871 std::atomic_store_explicit(&lck->lk.owner_id, gtid + 1,
872 std::memory_order_relaxed);
878 static int __kmp_test_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
880 char const *
const func =
"omp_test_nest_lock";
882 if (!std::atomic_load_explicit(&lck->lk.initialized,
883 std::memory_order_relaxed)) {
884 KMP_FATAL(LockIsUninitialized, func);
886 if (lck->lk.self != lck) {
887 KMP_FATAL(LockIsUninitialized, func);
889 if (!__kmp_is_ticket_lock_nestable(lck)) {
890 KMP_FATAL(LockSimpleUsedAsNestable, func);
892 return __kmp_test_nested_ticket_lock(lck, gtid);
895 int __kmp_release_nested_ticket_lock(kmp_ticket_lock_t *lck, kmp_int32 gtid) {
896 KMP_DEBUG_ASSERT(gtid >= 0);
898 if ((std::atomic_fetch_add_explicit(&lck->lk.depth_locked, -1,
899 std::memory_order_relaxed) -
901 std::atomic_store_explicit(&lck->lk.owner_id, 0, std::memory_order_relaxed);
902 __kmp_release_ticket_lock(lck, gtid);
903 return KMP_LOCK_RELEASED;
905 return KMP_LOCK_STILL_HELD;
908 static int __kmp_release_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck,
910 char const *
const func =
"omp_unset_nest_lock";
912 if (!std::atomic_load_explicit(&lck->lk.initialized,
913 std::memory_order_relaxed)) {
914 KMP_FATAL(LockIsUninitialized, func);
916 if (lck->lk.self != lck) {
917 KMP_FATAL(LockIsUninitialized, func);
919 if (!__kmp_is_ticket_lock_nestable(lck)) {
920 KMP_FATAL(LockSimpleUsedAsNestable, func);
922 if (__kmp_get_ticket_lock_owner(lck) == -1) {
923 KMP_FATAL(LockUnsettingFree, func);
925 if (__kmp_get_ticket_lock_owner(lck) != gtid) {
926 KMP_FATAL(LockUnsettingSetByAnother, func);
928 return __kmp_release_nested_ticket_lock(lck, gtid);
931 void __kmp_init_nested_ticket_lock(kmp_ticket_lock_t *lck) {
932 __kmp_init_ticket_lock(lck);
933 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
934 std::memory_order_relaxed);
938 void __kmp_destroy_nested_ticket_lock(kmp_ticket_lock_t *lck) {
939 __kmp_destroy_ticket_lock(lck);
940 std::atomic_store_explicit(&lck->lk.depth_locked, 0,
941 std::memory_order_relaxed);
945 __kmp_destroy_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
946 char const *
const func =
"omp_destroy_nest_lock";
948 if (!std::atomic_load_explicit(&lck->lk.initialized,
949 std::memory_order_relaxed)) {
950 KMP_FATAL(LockIsUninitialized, func);
952 if (lck->lk.self != lck) {
953 KMP_FATAL(LockIsUninitialized, func);
955 if (!__kmp_is_ticket_lock_nestable(lck)) {
956 KMP_FATAL(LockSimpleUsedAsNestable, func);
958 if (__kmp_get_ticket_lock_owner(lck) != -1) {
959 KMP_FATAL(LockStillOwned, func);
961 __kmp_destroy_nested_ticket_lock(lck);
966 static const ident_t *__kmp_get_ticket_lock_location(kmp_ticket_lock_t *lck) {
967 return lck->lk.location;
970 static void __kmp_set_ticket_lock_location(kmp_ticket_lock_t *lck,
972 lck->lk.location = loc;
975 static kmp_lock_flags_t __kmp_get_ticket_lock_flags(kmp_ticket_lock_t *lck) {
976 return lck->lk.flags;
979 static void __kmp_set_ticket_lock_flags(kmp_ticket_lock_t *lck,
980 kmp_lock_flags_t flags) {
981 lck->lk.flags = flags;
1039 #ifdef DEBUG_QUEUING_LOCKS 1042 #define TRACE_BUF_ELE 1024 1043 static char traces[TRACE_BUF_ELE][128] = {0};
1045 #define TRACE_LOCK(X, Y) \ 1046 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s\n", X, Y); 1047 #define TRACE_LOCK_T(X, Y, Z) \ 1048 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s%d\n", X, Y, Z); 1049 #define TRACE_LOCK_HT(X, Y, Z, Q) \ 1050 KMP_SNPRINTF(traces[tc++ % TRACE_BUF_ELE], 128, "t%d at %s %d,%d\n", X, Y, \ 1053 static void __kmp_dump_queuing_lock(kmp_info_t *this_thr, kmp_int32 gtid,
1054 kmp_queuing_lock_t *lck, kmp_int32 head_id,
1055 kmp_int32 tail_id) {
1058 __kmp_printf_no_lock(
"\n__kmp_dump_queuing_lock: TRACE BEGINS HERE! \n");
1060 i = tc % TRACE_BUF_ELE;
1061 __kmp_printf_no_lock(
"%s\n", traces[i]);
1062 i = (i + 1) % TRACE_BUF_ELE;
1063 while (i != (tc % TRACE_BUF_ELE)) {
1064 __kmp_printf_no_lock(
"%s", traces[i]);
1065 i = (i + 1) % TRACE_BUF_ELE;
1067 __kmp_printf_no_lock(
"\n");
1069 __kmp_printf_no_lock(
"\n__kmp_dump_queuing_lock: gtid+1:%d, spin_here:%d, " 1070 "next_wait:%d, head_id:%d, tail_id:%d\n",
1071 gtid + 1, this_thr->th.th_spin_here,
1072 this_thr->th.th_next_waiting, head_id, tail_id);
1074 __kmp_printf_no_lock(
"\t\thead: %d ", lck->lk.head_id);
1076 if (lck->lk.head_id >= 1) {
1077 t = __kmp_threads[lck->lk.head_id - 1]->th.th_next_waiting;
1079 __kmp_printf_no_lock(
"-> %d ", t);
1080 t = __kmp_threads[t - 1]->th.th_next_waiting;
1083 __kmp_printf_no_lock(
"; tail: %d ", lck->lk.tail_id);
1084 __kmp_printf_no_lock(
"\n\n");
1089 static kmp_int32 __kmp_get_queuing_lock_owner(kmp_queuing_lock_t *lck) {
1090 return TCR_4(lck->lk.owner_id) - 1;
1093 static inline bool __kmp_is_queuing_lock_nestable(kmp_queuing_lock_t *lck) {
1094 return lck->lk.depth_locked != -1;
1098 template <
bool takeTime>
1101 __forceinline
static int 1102 __kmp_acquire_queuing_lock_timed_template(kmp_queuing_lock_t *lck,
1104 kmp_info_t *this_thr = __kmp_thread_from_gtid(gtid);
1105 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1106 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1107 volatile kmp_uint32 *spin_here_p;
1108 kmp_int32 need_mf = 1;
1111 ompt_state_t prev_state = ompt_state_undefined;
1115 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1117 KMP_FSYNC_PREPARE(lck);
1118 KMP_DEBUG_ASSERT(this_thr != NULL);
1119 spin_here_p = &this_thr->th.th_spin_here;
1121 #ifdef DEBUG_QUEUING_LOCKS 1122 TRACE_LOCK(gtid + 1,
"acq ent");
1124 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1125 if (this_thr->th.th_next_waiting != 0)
1126 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1128 KMP_DEBUG_ASSERT(!*spin_here_p);
1129 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1138 *spin_here_p = TRUE;
1150 #ifdef DEBUG_QUEUING_LOCKS 1152 TRACE_LOCK_HT(gtid + 1,
"acq read: ", head, tail);
1161 enqueued = KMP_COMPARE_AND_STORE_ACQ64((
volatile kmp_int64 *)tail_id_p,
1163 KMP_PACK_64(gtid + 1, gtid + 1));
1164 #ifdef DEBUG_QUEUING_LOCKS 1166 TRACE_LOCK(gtid + 1,
"acq enq: (-1,0)->(tid,tid)");
1172 KMP_DEBUG_ASSERT(tail != gtid + 1);
1174 #ifdef DEBUG_QUEUING_LOCKS 1175 TRACE_LOCK_HT(gtid + 1,
"acq read: ", head, tail);
1183 enqueued = KMP_COMPARE_AND_STORE_ACQ32(tail_id_p, tail, gtid + 1);
1185 #ifdef DEBUG_QUEUING_LOCKS 1187 TRACE_LOCK(gtid + 1,
"acq enq: (h,t)->(h,tid)");
1194 kmp_int32 grabbed_lock;
1196 #ifdef DEBUG_QUEUING_LOCKS 1198 TRACE_LOCK_HT(gtid + 1,
"acq read: ", head, tail);
1204 grabbed_lock = KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1);
1208 *spin_here_p = FALSE;
1212 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: no queuing\n",
1214 #ifdef DEBUG_QUEUING_LOCKS 1215 TRACE_LOCK_HT(gtid + 1,
"acq exit: ", head, 0);
1219 if (ompt_enabled.enabled && prev_state != ompt_state_undefined) {
1221 this_thr->th.ompt_thread_info.state = prev_state;
1222 this_thr->th.ompt_thread_info.wait_id = 0;
1226 KMP_FSYNC_ACQUIRED(lck);
1227 return KMP_LOCK_ACQUIRED_FIRST;
1234 if (ompt_enabled.enabled && prev_state == ompt_state_undefined) {
1236 prev_state = this_thr->th.ompt_thread_info.state;
1237 this_thr->th.ompt_thread_info.wait_id = (uint64_t)lck;
1238 this_thr->th.ompt_thread_info.state = ompt_state_wait_lock;
1244 kmp_info_t *tail_thr = __kmp_thread_from_gtid(tail - 1);
1245 KMP_ASSERT(tail_thr != NULL);
1246 tail_thr->th.th_next_waiting = gtid + 1;
1250 (
"__kmp_acquire_queuing_lock: lck:%p, T#%d waiting for lock\n",
1256 KMP_WAIT_YIELD(spin_here_p, FALSE, KMP_EQ, lck);
1258 #ifdef DEBUG_QUEUING_LOCKS 1259 TRACE_LOCK(gtid + 1,
"acq spin");
1261 if (this_thr->th.th_next_waiting != 0)
1262 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1264 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1265 KA_TRACE(1000, (
"__kmp_acquire_queuing_lock: lck:%p, T#%d exiting: after " 1266 "waiting on queue\n",
1269 #ifdef DEBUG_QUEUING_LOCKS 1270 TRACE_LOCK(gtid + 1,
"acq exit 2");
1275 this_thr->th.ompt_thread_info.state = prev_state;
1276 this_thr->th.ompt_thread_info.wait_id = 0;
1280 return KMP_LOCK_ACQUIRED_FIRST;
1286 KMP_YIELD(TCR_4(__kmp_nth) >
1287 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
1288 #ifdef DEBUG_QUEUING_LOCKS 1289 TRACE_LOCK(gtid + 1,
"acq retry");
1292 KMP_ASSERT2(0,
"should not get here");
1293 return KMP_LOCK_ACQUIRED_FIRST;
1296 int __kmp_acquire_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1297 KMP_DEBUG_ASSERT(gtid >= 0);
1299 int retval = __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1300 ANNOTATE_QUEUING_ACQUIRED(lck);
1304 static int __kmp_acquire_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1306 char const *
const func =
"omp_set_lock";
1307 if (lck->lk.initialized != lck) {
1308 KMP_FATAL(LockIsUninitialized, func);
1310 if (__kmp_is_queuing_lock_nestable(lck)) {
1311 KMP_FATAL(LockNestableUsedAsSimple, func);
1313 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1314 KMP_FATAL(LockIsAlreadyOwned, func);
1317 __kmp_acquire_queuing_lock(lck, gtid);
1319 lck->lk.owner_id = gtid + 1;
1320 return KMP_LOCK_ACQUIRED_FIRST;
1323 int __kmp_test_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1324 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1327 kmp_info_t *this_thr;
1330 KA_TRACE(1000, (
"__kmp_test_queuing_lock: T#%d entering\n", gtid));
1331 KMP_DEBUG_ASSERT(gtid >= 0);
1333 this_thr = __kmp_thread_from_gtid(gtid);
1334 KMP_DEBUG_ASSERT(this_thr != NULL);
1335 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1342 if (KMP_COMPARE_AND_STORE_ACQ32(head_id_p, 0, -1)) {
1344 (
"__kmp_test_queuing_lock: T#%d exiting: holding lock\n", gtid));
1345 KMP_FSYNC_ACQUIRED(lck);
1346 ANNOTATE_QUEUING_ACQUIRED(lck);
1352 (
"__kmp_test_queuing_lock: T#%d exiting: without lock\n", gtid));
1356 static int __kmp_test_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1358 char const *
const func =
"omp_test_lock";
1359 if (lck->lk.initialized != lck) {
1360 KMP_FATAL(LockIsUninitialized, func);
1362 if (__kmp_is_queuing_lock_nestable(lck)) {
1363 KMP_FATAL(LockNestableUsedAsSimple, func);
1366 int retval = __kmp_test_queuing_lock(lck, gtid);
1369 lck->lk.owner_id = gtid + 1;
1374 int __kmp_release_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1375 kmp_info_t *this_thr;
1376 volatile kmp_int32 *head_id_p = &lck->lk.head_id;
1377 volatile kmp_int32 *tail_id_p = &lck->lk.tail_id;
1380 (
"__kmp_release_queuing_lock: lck:%p, T#%d entering\n", lck, gtid));
1381 KMP_DEBUG_ASSERT(gtid >= 0);
1382 this_thr = __kmp_thread_from_gtid(gtid);
1383 KMP_DEBUG_ASSERT(this_thr != NULL);
1384 #ifdef DEBUG_QUEUING_LOCKS 1385 TRACE_LOCK(gtid + 1,
"rel ent");
1387 if (this_thr->th.th_spin_here)
1388 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1389 if (this_thr->th.th_next_waiting != 0)
1390 __kmp_dump_queuing_lock(this_thr, gtid, lck, *head_id_p, *tail_id_p);
1392 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
1393 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
1395 KMP_FSYNC_RELEASING(lck);
1396 ANNOTATE_QUEUING_RELEASED(lck);
1405 #ifdef DEBUG_QUEUING_LOCKS 1407 TRACE_LOCK_HT(gtid + 1,
"rel read: ", head, tail);
1409 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1411 KMP_DEBUG_ASSERT(head !=
1416 if (KMP_COMPARE_AND_STORE_REL32(head_id_p, -1, 0)) {
1419 (
"__kmp_release_queuing_lock: lck:%p, T#%d exiting: queue empty\n",
1421 #ifdef DEBUG_QUEUING_LOCKS 1422 TRACE_LOCK_HT(gtid + 1,
"rel exit: ", 0, 0);
1428 return KMP_LOCK_RELEASED;
1435 #ifdef DEBUG_QUEUING_LOCKS 1437 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1439 KMP_DEBUG_ASSERT(head > 0);
1442 dequeued = KMP_COMPARE_AND_STORE_REL64(
1443 RCAST(
volatile kmp_int64 *, tail_id_p), KMP_PACK_64(head, head),
1444 KMP_PACK_64(-1, 0));
1445 #ifdef DEBUG_QUEUING_LOCKS 1446 TRACE_LOCK(gtid + 1,
"rel deq: (h,h)->(-1,0)");
1450 volatile kmp_int32 *waiting_id_p;
1451 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1452 KMP_DEBUG_ASSERT(head_thr != NULL);
1453 waiting_id_p = &head_thr->th.th_next_waiting;
1456 #ifdef DEBUG_QUEUING_LOCKS 1457 if (head <= 0 || tail <= 0)
1458 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1460 KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1466 *head_id_p = KMP_WAIT_YIELD((
volatile kmp_uint32 *)waiting_id_p, 0,
1468 #ifdef DEBUG_QUEUING_LOCKS 1469 TRACE_LOCK(gtid + 1,
"rel deq: (h,t)->(h',t)");
1476 kmp_info_t *head_thr = __kmp_thread_from_gtid(head - 1);
1477 KMP_DEBUG_ASSERT(head_thr != NULL);
1480 #ifdef DEBUG_QUEUING_LOCKS 1481 if (head <= 0 || tail <= 0)
1482 __kmp_dump_queuing_lock(this_thr, gtid, lck, head, tail);
1484 KMP_DEBUG_ASSERT(head > 0 && tail > 0);
1488 head_thr->th.th_next_waiting = 0;
1489 #ifdef DEBUG_QUEUING_LOCKS 1490 TRACE_LOCK_T(gtid + 1,
"rel nw=0 for t=", head);
1495 head_thr->th.th_spin_here = FALSE;
1497 KA_TRACE(1000, (
"__kmp_release_queuing_lock: lck:%p, T#%d exiting: after " 1500 #ifdef DEBUG_QUEUING_LOCKS 1501 TRACE_LOCK(gtid + 1,
"rel exit 2");
1503 return KMP_LOCK_RELEASED;
1508 #ifdef DEBUG_QUEUING_LOCKS 1509 TRACE_LOCK(gtid + 1,
"rel retry");
1513 KMP_ASSERT2(0,
"should not get here");
1514 return KMP_LOCK_RELEASED;
1517 static int __kmp_release_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1519 char const *
const func =
"omp_unset_lock";
1521 if (lck->lk.initialized != lck) {
1522 KMP_FATAL(LockIsUninitialized, func);
1524 if (__kmp_is_queuing_lock_nestable(lck)) {
1525 KMP_FATAL(LockNestableUsedAsSimple, func);
1527 if (__kmp_get_queuing_lock_owner(lck) == -1) {
1528 KMP_FATAL(LockUnsettingFree, func);
1530 if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1531 KMP_FATAL(LockUnsettingSetByAnother, func);
1533 lck->lk.owner_id = 0;
1534 return __kmp_release_queuing_lock(lck, gtid);
1537 void __kmp_init_queuing_lock(kmp_queuing_lock_t *lck) {
1538 lck->lk.location = NULL;
1539 lck->lk.head_id = 0;
1540 lck->lk.tail_id = 0;
1541 lck->lk.next_ticket = 0;
1542 lck->lk.now_serving = 0;
1543 lck->lk.owner_id = 0;
1544 lck->lk.depth_locked = -1;
1545 lck->lk.initialized = lck;
1547 KA_TRACE(1000, (
"__kmp_init_queuing_lock: lock %p initialized\n", lck));
1550 void __kmp_destroy_queuing_lock(kmp_queuing_lock_t *lck) {
1551 lck->lk.initialized = NULL;
1552 lck->lk.location = NULL;
1553 lck->lk.head_id = 0;
1554 lck->lk.tail_id = 0;
1555 lck->lk.next_ticket = 0;
1556 lck->lk.now_serving = 0;
1557 lck->lk.owner_id = 0;
1558 lck->lk.depth_locked = -1;
1561 static void __kmp_destroy_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1562 char const *
const func =
"omp_destroy_lock";
1563 if (lck->lk.initialized != lck) {
1564 KMP_FATAL(LockIsUninitialized, func);
1566 if (__kmp_is_queuing_lock_nestable(lck)) {
1567 KMP_FATAL(LockNestableUsedAsSimple, func);
1569 if (__kmp_get_queuing_lock_owner(lck) != -1) {
1570 KMP_FATAL(LockStillOwned, func);
1572 __kmp_destroy_queuing_lock(lck);
1577 int __kmp_acquire_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1578 KMP_DEBUG_ASSERT(gtid >= 0);
1580 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1581 lck->lk.depth_locked += 1;
1582 return KMP_LOCK_ACQUIRED_NEXT;
1584 __kmp_acquire_queuing_lock_timed_template<false>(lck, gtid);
1585 ANNOTATE_QUEUING_ACQUIRED(lck);
1587 lck->lk.depth_locked = 1;
1589 lck->lk.owner_id = gtid + 1;
1590 return KMP_LOCK_ACQUIRED_FIRST;
1595 __kmp_acquire_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1597 char const *
const func =
"omp_set_nest_lock";
1598 if (lck->lk.initialized != lck) {
1599 KMP_FATAL(LockIsUninitialized, func);
1601 if (!__kmp_is_queuing_lock_nestable(lck)) {
1602 KMP_FATAL(LockSimpleUsedAsNestable, func);
1604 return __kmp_acquire_nested_queuing_lock(lck, gtid);
1607 int __kmp_test_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1610 KMP_DEBUG_ASSERT(gtid >= 0);
1612 if (__kmp_get_queuing_lock_owner(lck) == gtid) {
1613 retval = ++lck->lk.depth_locked;
1614 }
else if (!__kmp_test_queuing_lock(lck, gtid)) {
1618 retval = lck->lk.depth_locked = 1;
1620 lck->lk.owner_id = gtid + 1;
1625 static int __kmp_test_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1627 char const *
const func =
"omp_test_nest_lock";
1628 if (lck->lk.initialized != lck) {
1629 KMP_FATAL(LockIsUninitialized, func);
1631 if (!__kmp_is_queuing_lock_nestable(lck)) {
1632 KMP_FATAL(LockSimpleUsedAsNestable, func);
1634 return __kmp_test_nested_queuing_lock(lck, gtid);
1637 int __kmp_release_nested_queuing_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
1638 KMP_DEBUG_ASSERT(gtid >= 0);
1641 if (--(lck->lk.depth_locked) == 0) {
1643 lck->lk.owner_id = 0;
1644 __kmp_release_queuing_lock(lck, gtid);
1645 return KMP_LOCK_RELEASED;
1647 return KMP_LOCK_STILL_HELD;
1651 __kmp_release_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck,
1653 char const *
const func =
"omp_unset_nest_lock";
1655 if (lck->lk.initialized != lck) {
1656 KMP_FATAL(LockIsUninitialized, func);
1658 if (!__kmp_is_queuing_lock_nestable(lck)) {
1659 KMP_FATAL(LockSimpleUsedAsNestable, func);
1661 if (__kmp_get_queuing_lock_owner(lck) == -1) {
1662 KMP_FATAL(LockUnsettingFree, func);
1664 if (__kmp_get_queuing_lock_owner(lck) != gtid) {
1665 KMP_FATAL(LockUnsettingSetByAnother, func);
1667 return __kmp_release_nested_queuing_lock(lck, gtid);
1670 void __kmp_init_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1671 __kmp_init_queuing_lock(lck);
1672 lck->lk.depth_locked = 0;
1675 void __kmp_destroy_nested_queuing_lock(kmp_queuing_lock_t *lck) {
1676 __kmp_destroy_queuing_lock(lck);
1677 lck->lk.depth_locked = 0;
1681 __kmp_destroy_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
1682 char const *
const func =
"omp_destroy_nest_lock";
1683 if (lck->lk.initialized != lck) {
1684 KMP_FATAL(LockIsUninitialized, func);
1686 if (!__kmp_is_queuing_lock_nestable(lck)) {
1687 KMP_FATAL(LockSimpleUsedAsNestable, func);
1689 if (__kmp_get_queuing_lock_owner(lck) != -1) {
1690 KMP_FATAL(LockStillOwned, func);
1692 __kmp_destroy_nested_queuing_lock(lck);
1697 static const ident_t *__kmp_get_queuing_lock_location(kmp_queuing_lock_t *lck) {
1698 return lck->lk.location;
1701 static void __kmp_set_queuing_lock_location(kmp_queuing_lock_t *lck,
1703 lck->lk.location = loc;
1706 static kmp_lock_flags_t __kmp_get_queuing_lock_flags(kmp_queuing_lock_t *lck) {
1707 return lck->lk.flags;
1710 static void __kmp_set_queuing_lock_flags(kmp_queuing_lock_t *lck,
1711 kmp_lock_flags_t flags) {
1712 lck->lk.flags = flags;
1715 #if KMP_USE_ADAPTIVE_LOCKS 1719 #if (KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300) || \ 1720 (KMP_COMPILER_MSVC && _MSC_VER >= 1700) || \ 1721 (KMP_COMPILER_CLANG && KMP_MSVC_COMPAT) 1723 #include <immintrin.h> 1724 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT) 1729 #define _XBEGIN_STARTED (~0u) 1730 #define _XABORT_EXPLICIT (1 << 0) 1731 #define _XABORT_RETRY (1 << 1) 1732 #define _XABORT_CONFLICT (1 << 2) 1733 #define _XABORT_CAPACITY (1 << 3) 1734 #define _XABORT_DEBUG (1 << 4) 1735 #define _XABORT_NESTED (1 << 5) 1736 #define _XABORT_CODE(x) ((unsigned char)(((x) >> 24) & 0xFF)) 1739 #define SOFT_ABORT_MASK (_XABORT_RETRY | _XABORT_CONFLICT | _XABORT_EXPLICIT) 1741 #define STRINGIZE_INTERNAL(arg) #arg 1742 #define STRINGIZE(arg) STRINGIZE_INTERNAL(arg) 1748 static __inline
int _xbegin() {
1776 #endif // KMP_ARCH_X86_64 1785 __asm__
volatile(
"1: .byte 0xC7; .byte 0xF8;\n" 1788 "1: movl %%eax,%0\n" 1790 :
"+r"(res)::
"memory",
"%eax");
1791 #endif // KMP_OS_WINDOWS 1796 static __inline
void _xend() {
1804 __asm__
volatile(
".byte 0x0f; .byte 0x01; .byte 0xd5" :::
"memory");
1813 #define _xabort(ARG) _asm _emit 0xc6 _asm _emit 0xf8 _asm _emit ARG 1815 #define _xabort(ARG) \ 1816 __asm__ volatile(".byte 0xC6; .byte 0xF8; .byte " STRINGIZE(ARG):::"memory"); 1819 #endif // KMP_COMPILER_ICC && __INTEL_COMPILER >= 1300 1822 #if KMP_DEBUG_ADAPTIVE_LOCKS 1827 static kmp_adaptive_lock_statistics_t destroyedStats;
1830 static kmp_adaptive_lock_info_t liveLocks;
1833 static kmp_bootstrap_lock_t chain_lock =
1834 KMP_BOOTSTRAP_LOCK_INITIALIZER(chain_lock);
1837 void __kmp_init_speculative_stats() {
1838 kmp_adaptive_lock_info_t *lck = &liveLocks;
1840 memset(CCAST(kmp_adaptive_lock_statistics_t *, &(lck->stats)), 0,
1841 sizeof(lck->stats));
1842 lck->stats.next = lck;
1843 lck->stats.prev = lck;
1845 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1846 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1848 __kmp_init_bootstrap_lock(&chain_lock);
1852 static void __kmp_remember_lock(kmp_adaptive_lock_info_t *lck) {
1853 __kmp_acquire_bootstrap_lock(&chain_lock);
1855 lck->stats.next = liveLocks.stats.next;
1856 lck->stats.prev = &liveLocks;
1858 liveLocks.stats.next = lck;
1859 lck->stats.next->stats.prev = lck;
1861 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1862 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1864 __kmp_release_bootstrap_lock(&chain_lock);
1867 static void __kmp_forget_lock(kmp_adaptive_lock_info_t *lck) {
1868 KMP_ASSERT(lck->stats.next->stats.prev == lck);
1869 KMP_ASSERT(lck->stats.prev->stats.next == lck);
1871 kmp_adaptive_lock_info_t *n = lck->stats.next;
1872 kmp_adaptive_lock_info_t *p = lck->stats.prev;
1878 static void __kmp_zero_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1879 memset(CCAST(kmp_adaptive_lock_statistics_t *, &lck->stats), 0,
1880 sizeof(lck->stats));
1881 __kmp_remember_lock(lck);
1884 static void __kmp_add_stats(kmp_adaptive_lock_statistics_t *t,
1885 kmp_adaptive_lock_info_t *lck) {
1886 kmp_adaptive_lock_statistics_t
volatile *s = &lck->stats;
1888 t->nonSpeculativeAcquireAttempts += lck->acquire_attempts;
1889 t->successfulSpeculations += s->successfulSpeculations;
1890 t->hardFailedSpeculations += s->hardFailedSpeculations;
1891 t->softFailedSpeculations += s->softFailedSpeculations;
1892 t->nonSpeculativeAcquires += s->nonSpeculativeAcquires;
1893 t->lemmingYields += s->lemmingYields;
1896 static void __kmp_accumulate_speculative_stats(kmp_adaptive_lock_info_t *lck) {
1897 __kmp_acquire_bootstrap_lock(&chain_lock);
1899 __kmp_add_stats(&destroyedStats, lck);
1900 __kmp_forget_lock(lck);
1902 __kmp_release_bootstrap_lock(&chain_lock);
1905 static float percent(kmp_uint32 count, kmp_uint32 total) {
1906 return (total == 0) ? 0.0 : (100.0 * count) / total;
1909 static FILE *__kmp_open_stats_file() {
1910 if (strcmp(__kmp_speculative_statsfile,
"-") == 0)
1913 size_t buffLen = KMP_STRLEN(__kmp_speculative_statsfile) + 20;
1914 char buffer[buffLen];
1915 KMP_SNPRINTF(&buffer[0], buffLen, __kmp_speculative_statsfile,
1916 (kmp_int32)getpid());
1917 FILE *result = fopen(&buffer[0],
"w");
1920 return result ? result : stdout;
1923 void __kmp_print_speculative_stats() {
1924 kmp_adaptive_lock_statistics_t total = destroyedStats;
1925 kmp_adaptive_lock_info_t *lck;
1927 for (lck = liveLocks.stats.next; lck != &liveLocks; lck = lck->stats.next) {
1928 __kmp_add_stats(&total, lck);
1930 kmp_adaptive_lock_statistics_t *t = &total;
1931 kmp_uint32 totalSections =
1932 t->nonSpeculativeAcquires + t->successfulSpeculations;
1933 kmp_uint32 totalSpeculations = t->successfulSpeculations +
1934 t->hardFailedSpeculations +
1935 t->softFailedSpeculations;
1936 if (totalSections <= 0)
1939 FILE *statsFile = __kmp_open_stats_file();
1941 fprintf(statsFile,
"Speculative lock statistics (all approximate!)\n");
1942 fprintf(statsFile,
" Lock parameters: \n" 1943 " max_soft_retries : %10d\n" 1944 " max_badness : %10d\n",
1945 __kmp_adaptive_backoff_params.max_soft_retries,
1946 __kmp_adaptive_backoff_params.max_badness);
1947 fprintf(statsFile,
" Non-speculative acquire attempts : %10d\n",
1948 t->nonSpeculativeAcquireAttempts);
1949 fprintf(statsFile,
" Total critical sections : %10d\n",
1951 fprintf(statsFile,
" Successful speculations : %10d (%5.1f%%)\n",
1952 t->successfulSpeculations,
1953 percent(t->successfulSpeculations, totalSections));
1954 fprintf(statsFile,
" Non-speculative acquires : %10d (%5.1f%%)\n",
1955 t->nonSpeculativeAcquires,
1956 percent(t->nonSpeculativeAcquires, totalSections));
1957 fprintf(statsFile,
" Lemming yields : %10d\n\n",
1960 fprintf(statsFile,
" Speculative acquire attempts : %10d\n",
1962 fprintf(statsFile,
" Successes : %10d (%5.1f%%)\n",
1963 t->successfulSpeculations,
1964 percent(t->successfulSpeculations, totalSpeculations));
1965 fprintf(statsFile,
" Soft failures : %10d (%5.1f%%)\n",
1966 t->softFailedSpeculations,
1967 percent(t->softFailedSpeculations, totalSpeculations));
1968 fprintf(statsFile,
" Hard failures : %10d (%5.1f%%)\n",
1969 t->hardFailedSpeculations,
1970 percent(t->hardFailedSpeculations, totalSpeculations));
1972 if (statsFile != stdout)
1976 #define KMP_INC_STAT(lck, stat) (lck->lk.adaptive.stats.stat++) 1978 #define KMP_INC_STAT(lck, stat) 1980 #endif // KMP_DEBUG_ADAPTIVE_LOCKS 1982 static inline bool __kmp_is_unlocked_queuing_lock(kmp_queuing_lock_t *lck) {
1985 bool res = lck->lk.head_id == 0;
1989 #if KMP_COMPILER_ICC 1992 __sync_synchronize();
1999 static __inline
void 2000 __kmp_update_badness_after_success(kmp_adaptive_lock_t *lck) {
2002 lck->lk.adaptive.badness = 0;
2003 KMP_INC_STAT(lck, successfulSpeculations);
2007 static __inline
void __kmp_step_badness(kmp_adaptive_lock_t *lck) {
2008 kmp_uint32 newBadness = (lck->lk.adaptive.badness << 1) | 1;
2009 if (newBadness > lck->lk.adaptive.max_badness) {
2012 lck->lk.adaptive.badness = newBadness;
2017 static __inline
int __kmp_should_speculate(kmp_adaptive_lock_t *lck,
2019 kmp_uint32 badness = lck->lk.adaptive.badness;
2020 kmp_uint32 attempts = lck->lk.adaptive.acquire_attempts;
2021 int res = (attempts & badness) == 0;
2027 static int __kmp_test_adaptive_lock_only(kmp_adaptive_lock_t *lck,
2029 int retries = lck->lk.adaptive.max_soft_retries;
2036 kmp_uint32 status = _xbegin();
2041 if (status == _XBEGIN_STARTED) {
2046 if (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2050 KMP_ASSERT2(0,
"should not get here");
2055 if (status & SOFT_ABORT_MASK) {
2056 KMP_INC_STAT(lck, softFailedSpeculations);
2059 KMP_INC_STAT(lck, hardFailedSpeculations);
2064 }
while (retries--);
2068 __kmp_step_badness(lck);
2075 static int __kmp_test_adaptive_lock(kmp_adaptive_lock_t *lck, kmp_int32 gtid) {
2077 if (__kmp_should_speculate(lck, gtid) &&
2078 __kmp_test_adaptive_lock_only(lck, gtid))
2083 lck->lk.adaptive.acquire_attempts++;
2086 if (__kmp_test_queuing_lock(GET_QLK_PTR(lck), gtid)) {
2087 KMP_INC_STAT(lck, nonSpeculativeAcquires);
2094 static int __kmp_test_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2096 char const *
const func =
"omp_test_lock";
2097 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2098 KMP_FATAL(LockIsUninitialized, func);
2101 int retval = __kmp_test_adaptive_lock(lck, gtid);
2104 lck->lk.qlk.owner_id = gtid + 1;
2120 static void __kmp_acquire_adaptive_lock(kmp_adaptive_lock_t *lck,
2122 if (__kmp_should_speculate(lck, gtid)) {
2123 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2124 if (__kmp_test_adaptive_lock_only(lck, gtid))
2133 while (!__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(lck))) {
2134 KMP_INC_STAT(lck, lemmingYields);
2138 if (__kmp_test_adaptive_lock_only(lck, gtid))
2145 lck->lk.adaptive.acquire_attempts++;
2147 __kmp_acquire_queuing_lock_timed_template<FALSE>(GET_QLK_PTR(lck), gtid);
2149 KMP_INC_STAT(lck, nonSpeculativeAcquires);
2150 ANNOTATE_QUEUING_ACQUIRED(lck);
2153 static void __kmp_acquire_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2155 char const *
const func =
"omp_set_lock";
2156 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2157 KMP_FATAL(LockIsUninitialized, func);
2159 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == gtid) {
2160 KMP_FATAL(LockIsAlreadyOwned, func);
2163 __kmp_acquire_adaptive_lock(lck, gtid);
2165 lck->lk.qlk.owner_id = gtid + 1;
2168 static int __kmp_release_adaptive_lock(kmp_adaptive_lock_t *lck,
2170 if (__kmp_is_unlocked_queuing_lock(GET_QLK_PTR(
2175 __kmp_update_badness_after_success(lck);
2178 __kmp_release_queuing_lock(GET_QLK_PTR(lck), gtid);
2180 return KMP_LOCK_RELEASED;
2183 static int __kmp_release_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck,
2185 char const *
const func =
"omp_unset_lock";
2187 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2188 KMP_FATAL(LockIsUninitialized, func);
2190 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) == -1) {
2191 KMP_FATAL(LockUnsettingFree, func);
2193 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != gtid) {
2194 KMP_FATAL(LockUnsettingSetByAnother, func);
2196 lck->lk.qlk.owner_id = 0;
2197 __kmp_release_adaptive_lock(lck, gtid);
2198 return KMP_LOCK_RELEASED;
2201 static void __kmp_init_adaptive_lock(kmp_adaptive_lock_t *lck) {
2202 __kmp_init_queuing_lock(GET_QLK_PTR(lck));
2203 lck->lk.adaptive.badness = 0;
2204 lck->lk.adaptive.acquire_attempts = 0;
2205 lck->lk.adaptive.max_soft_retries =
2206 __kmp_adaptive_backoff_params.max_soft_retries;
2207 lck->lk.adaptive.max_badness = __kmp_adaptive_backoff_params.max_badness;
2208 #if KMP_DEBUG_ADAPTIVE_LOCKS 2209 __kmp_zero_speculative_stats(&lck->lk.adaptive);
2211 KA_TRACE(1000, (
"__kmp_init_adaptive_lock: lock %p initialized\n", lck));
2214 static void __kmp_destroy_adaptive_lock(kmp_adaptive_lock_t *lck) {
2215 #if KMP_DEBUG_ADAPTIVE_LOCKS 2216 __kmp_accumulate_speculative_stats(&lck->lk.adaptive);
2218 __kmp_destroy_queuing_lock(GET_QLK_PTR(lck));
2222 static void __kmp_destroy_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
2223 char const *
const func =
"omp_destroy_lock";
2224 if (lck->lk.qlk.initialized != GET_QLK_PTR(lck)) {
2225 KMP_FATAL(LockIsUninitialized, func);
2227 if (__kmp_get_queuing_lock_owner(GET_QLK_PTR(lck)) != -1) {
2228 KMP_FATAL(LockStillOwned, func);
2230 __kmp_destroy_adaptive_lock(lck);
2233 #endif // KMP_USE_ADAPTIVE_LOCKS 2239 static kmp_int32 __kmp_get_drdpa_lock_owner(kmp_drdpa_lock_t *lck) {
2240 return lck->lk.owner_id - 1;
2243 static inline bool __kmp_is_drdpa_lock_nestable(kmp_drdpa_lock_t *lck) {
2244 return lck->lk.depth_locked != -1;
2247 __forceinline
static int 2248 __kmp_acquire_drdpa_lock_timed_template(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2249 kmp_uint64 ticket = KMP_ATOMIC_INC(&lck->lk.next_ticket);
2250 kmp_uint64 mask = lck->lk.mask;
2251 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2253 #ifdef USE_LOCK_PROFILE 2254 if (polls[ticket & mask] != ticket)
2255 __kmp_printf(
"LOCK CONTENTION: %p\n", lck);
2269 KMP_FSYNC_PREPARE(lck);
2270 KMP_INIT_YIELD(spins);
2271 while (polls[ticket & mask] < ticket) {
2276 KMP_YIELD(TCR_4(__kmp_nth) >
2277 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc));
2278 KMP_YIELD_SPIN(spins);
2287 mask = lck->lk.mask;
2288 polls = lck->lk.polls;
2292 KMP_FSYNC_ACQUIRED(lck);
2293 KA_TRACE(1000, (
"__kmp_acquire_drdpa_lock: ticket #%lld acquired lock %p\n",
2295 lck->lk.now_serving = ticket;
2302 if ((lck->lk.old_polls != NULL) && (ticket >= lck->lk.cleanup_ticket)) {
2303 __kmp_free(lck->lk.old_polls);
2304 lck->lk.old_polls = NULL;
2305 lck->lk.cleanup_ticket = 0;
2311 if (lck->lk.old_polls == NULL) {
2312 bool reconfigure =
false;
2313 std::atomic<kmp_uint64> *old_polls = polls;
2314 kmp_uint32 num_polls = TCR_4(lck->lk.num_polls);
2316 if (TCR_4(__kmp_nth) >
2317 (__kmp_avail_proc ? __kmp_avail_proc : __kmp_xproc)) {
2320 if (num_polls > 1) {
2322 num_polls = TCR_4(lck->lk.num_polls);
2325 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls *
2333 kmp_uint64 num_waiting = TCR_8(lck->lk.next_ticket) - ticket - 1;
2334 if (num_waiting > num_polls) {
2335 kmp_uint32 old_num_polls = num_polls;
2338 mask = (mask << 1) | 1;
2340 }
while (num_polls <= num_waiting);
2346 polls = (std::atomic<kmp_uint64> *)__kmp_allocate(num_polls *
2349 for (i = 0; i < old_num_polls; i++) {
2350 polls[i].store(old_polls[i]);
2365 KA_TRACE(1000, (
"__kmp_acquire_drdpa_lock: ticket #%lld reconfiguring " 2366 "lock %p to %d polls\n",
2367 ticket, lck, num_polls));
2369 lck->lk.old_polls = old_polls;
2370 lck->lk.polls = polls;
2374 lck->lk.num_polls = num_polls;
2375 lck->lk.mask = mask;
2383 lck->lk.cleanup_ticket = lck->lk.next_ticket;
2386 return KMP_LOCK_ACQUIRED_FIRST;
2389 int __kmp_acquire_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2390 int retval = __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2391 ANNOTATE_DRDPA_ACQUIRED(lck);
2395 static int __kmp_acquire_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2397 char const *
const func =
"omp_set_lock";
2398 if (lck->lk.initialized != lck) {
2399 KMP_FATAL(LockIsUninitialized, func);
2401 if (__kmp_is_drdpa_lock_nestable(lck)) {
2402 KMP_FATAL(LockNestableUsedAsSimple, func);
2404 if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) == gtid)) {
2405 KMP_FATAL(LockIsAlreadyOwned, func);
2408 __kmp_acquire_drdpa_lock(lck, gtid);
2410 lck->lk.owner_id = gtid + 1;
2411 return KMP_LOCK_ACQUIRED_FIRST;
2414 int __kmp_test_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2417 kmp_uint64 ticket = lck->lk.next_ticket;
2418 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2419 kmp_uint64 mask = lck->lk.mask;
2420 if (polls[ticket & mask] == ticket) {
2421 kmp_uint64 next_ticket = ticket + 1;
2422 if (__kmp_atomic_compare_store_acq(&lck->lk.next_ticket, ticket,
2424 KMP_FSYNC_ACQUIRED(lck);
2425 KA_TRACE(1000, (
"__kmp_test_drdpa_lock: ticket #%lld acquired lock %p\n",
2427 lck->lk.now_serving = ticket;
2441 static int __kmp_test_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2443 char const *
const func =
"omp_test_lock";
2444 if (lck->lk.initialized != lck) {
2445 KMP_FATAL(LockIsUninitialized, func);
2447 if (__kmp_is_drdpa_lock_nestable(lck)) {
2448 KMP_FATAL(LockNestableUsedAsSimple, func);
2451 int retval = __kmp_test_drdpa_lock(lck, gtid);
2454 lck->lk.owner_id = gtid + 1;
2459 int __kmp_release_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2462 kmp_uint64 ticket = lck->lk.now_serving + 1;
2463 std::atomic<kmp_uint64> *polls = lck->lk.polls;
2464 kmp_uint64 mask = lck->lk.mask;
2465 KA_TRACE(1000, (
"__kmp_release_drdpa_lock: ticket #%lld released lock %p\n",
2467 KMP_FSYNC_RELEASING(lck);
2468 ANNOTATE_DRDPA_RELEASED(lck);
2469 polls[ticket & mask] = ticket;
2470 return KMP_LOCK_RELEASED;
2473 static int __kmp_release_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2475 char const *
const func =
"omp_unset_lock";
2477 if (lck->lk.initialized != lck) {
2478 KMP_FATAL(LockIsUninitialized, func);
2480 if (__kmp_is_drdpa_lock_nestable(lck)) {
2481 KMP_FATAL(LockNestableUsedAsSimple, func);
2483 if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2484 KMP_FATAL(LockUnsettingFree, func);
2486 if ((gtid >= 0) && (__kmp_get_drdpa_lock_owner(lck) >= 0) &&
2487 (__kmp_get_drdpa_lock_owner(lck) != gtid)) {
2488 KMP_FATAL(LockUnsettingSetByAnother, func);
2490 lck->lk.owner_id = 0;
2491 return __kmp_release_drdpa_lock(lck, gtid);
2494 void __kmp_init_drdpa_lock(kmp_drdpa_lock_t *lck) {
2495 lck->lk.location = NULL;
2497 lck->lk.num_polls = 1;
2498 lck->lk.polls = (std::atomic<kmp_uint64> *)__kmp_allocate(
2499 lck->lk.num_polls *
sizeof(*(lck->lk.polls)));
2500 lck->lk.cleanup_ticket = 0;
2501 lck->lk.old_polls = NULL;
2502 lck->lk.next_ticket = 0;
2503 lck->lk.now_serving = 0;
2504 lck->lk.owner_id = 0;
2505 lck->lk.depth_locked = -1;
2506 lck->lk.initialized = lck;
2508 KA_TRACE(1000, (
"__kmp_init_drdpa_lock: lock %p initialized\n", lck));
2511 void __kmp_destroy_drdpa_lock(kmp_drdpa_lock_t *lck) {
2512 lck->lk.initialized = NULL;
2513 lck->lk.location = NULL;
2514 if (lck->lk.polls.load() != NULL) {
2515 __kmp_free(lck->lk.polls.load());
2516 lck->lk.polls = NULL;
2518 if (lck->lk.old_polls != NULL) {
2519 __kmp_free(lck->lk.old_polls);
2520 lck->lk.old_polls = NULL;
2523 lck->lk.num_polls = 0;
2524 lck->lk.cleanup_ticket = 0;
2525 lck->lk.next_ticket = 0;
2526 lck->lk.now_serving = 0;
2527 lck->lk.owner_id = 0;
2528 lck->lk.depth_locked = -1;
2531 static void __kmp_destroy_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2532 char const *
const func =
"omp_destroy_lock";
2533 if (lck->lk.initialized != lck) {
2534 KMP_FATAL(LockIsUninitialized, func);
2536 if (__kmp_is_drdpa_lock_nestable(lck)) {
2537 KMP_FATAL(LockNestableUsedAsSimple, func);
2539 if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2540 KMP_FATAL(LockStillOwned, func);
2542 __kmp_destroy_drdpa_lock(lck);
2547 int __kmp_acquire_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2548 KMP_DEBUG_ASSERT(gtid >= 0);
2550 if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2551 lck->lk.depth_locked += 1;
2552 return KMP_LOCK_ACQUIRED_NEXT;
2554 __kmp_acquire_drdpa_lock_timed_template(lck, gtid);
2555 ANNOTATE_DRDPA_ACQUIRED(lck);
2557 lck->lk.depth_locked = 1;
2559 lck->lk.owner_id = gtid + 1;
2560 return KMP_LOCK_ACQUIRED_FIRST;
2564 static void __kmp_acquire_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2566 char const *
const func =
"omp_set_nest_lock";
2567 if (lck->lk.initialized != lck) {
2568 KMP_FATAL(LockIsUninitialized, func);
2570 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2571 KMP_FATAL(LockSimpleUsedAsNestable, func);
2573 __kmp_acquire_nested_drdpa_lock(lck, gtid);
2576 int __kmp_test_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2579 KMP_DEBUG_ASSERT(gtid >= 0);
2581 if (__kmp_get_drdpa_lock_owner(lck) == gtid) {
2582 retval = ++lck->lk.depth_locked;
2583 }
else if (!__kmp_test_drdpa_lock(lck, gtid)) {
2587 retval = lck->lk.depth_locked = 1;
2589 lck->lk.owner_id = gtid + 1;
2594 static int __kmp_test_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2596 char const *
const func =
"omp_test_nest_lock";
2597 if (lck->lk.initialized != lck) {
2598 KMP_FATAL(LockIsUninitialized, func);
2600 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2601 KMP_FATAL(LockSimpleUsedAsNestable, func);
2603 return __kmp_test_nested_drdpa_lock(lck, gtid);
2606 int __kmp_release_nested_drdpa_lock(kmp_drdpa_lock_t *lck, kmp_int32 gtid) {
2607 KMP_DEBUG_ASSERT(gtid >= 0);
2610 if (--(lck->lk.depth_locked) == 0) {
2612 lck->lk.owner_id = 0;
2613 __kmp_release_drdpa_lock(lck, gtid);
2614 return KMP_LOCK_RELEASED;
2616 return KMP_LOCK_STILL_HELD;
2619 static int __kmp_release_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck,
2621 char const *
const func =
"omp_unset_nest_lock";
2623 if (lck->lk.initialized != lck) {
2624 KMP_FATAL(LockIsUninitialized, func);
2626 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2627 KMP_FATAL(LockSimpleUsedAsNestable, func);
2629 if (__kmp_get_drdpa_lock_owner(lck) == -1) {
2630 KMP_FATAL(LockUnsettingFree, func);
2632 if (__kmp_get_drdpa_lock_owner(lck) != gtid) {
2633 KMP_FATAL(LockUnsettingSetByAnother, func);
2635 return __kmp_release_nested_drdpa_lock(lck, gtid);
2638 void __kmp_init_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2639 __kmp_init_drdpa_lock(lck);
2640 lck->lk.depth_locked = 0;
2643 void __kmp_destroy_nested_drdpa_lock(kmp_drdpa_lock_t *lck) {
2644 __kmp_destroy_drdpa_lock(lck);
2645 lck->lk.depth_locked = 0;
2648 static void __kmp_destroy_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
2649 char const *
const func =
"omp_destroy_nest_lock";
2650 if (lck->lk.initialized != lck) {
2651 KMP_FATAL(LockIsUninitialized, func);
2653 if (!__kmp_is_drdpa_lock_nestable(lck)) {
2654 KMP_FATAL(LockSimpleUsedAsNestable, func);
2656 if (__kmp_get_drdpa_lock_owner(lck) != -1) {
2657 KMP_FATAL(LockStillOwned, func);
2659 __kmp_destroy_nested_drdpa_lock(lck);
2664 static const ident_t *__kmp_get_drdpa_lock_location(kmp_drdpa_lock_t *lck) {
2665 return lck->lk.location;
2668 static void __kmp_set_drdpa_lock_location(kmp_drdpa_lock_t *lck,
2670 lck->lk.location = loc;
2673 static kmp_lock_flags_t __kmp_get_drdpa_lock_flags(kmp_drdpa_lock_t *lck) {
2674 return lck->lk.flags;
2677 static void __kmp_set_drdpa_lock_flags(kmp_drdpa_lock_t *lck,
2678 kmp_lock_flags_t flags) {
2679 lck->lk.flags = flags;
2683 #if KMP_ARCH_X86 || KMP_ARCH_X86_64 2684 #define __kmp_tsc() __kmp_hardware_timestamp() 2686 kmp_backoff_t __kmp_spin_backoff_params = {1, 4096, 100};
2689 extern kmp_uint64 __kmp_now_nsec();
2690 kmp_backoff_t __kmp_spin_backoff_params = {1, 256, 100};
2691 #define __kmp_tsc() __kmp_now_nsec() 2701 static inline bool before(kmp_uint64 a, kmp_uint64 b) {
2702 return ((kmp_int64)b - (kmp_int64)a) > 0;
2706 void __kmp_spin_backoff(kmp_backoff_t *boff) {
2709 for (i = boff->step; i > 0; i--) {
2710 kmp_uint64 goal = __kmp_tsc() + boff->min_tick;
2713 }
while (before(__kmp_tsc(), goal));
2715 boff->step = (boff->step << 1 | 1) & (boff->max_backoff - 1);
2718 #if KMP_USE_DYNAMIC_LOCK 2722 static void __kmp_init_direct_lock(kmp_dyna_lock_t *lck,
2723 kmp_dyna_lockseq_t seq) {
2724 TCW_4(*lck, KMP_GET_D_TAG(seq));
2727 (
"__kmp_init_direct_lock: initialized direct lock with type#%d\n", seq));
2733 #define HLE_ACQUIRE ".byte 0xf2;" 2734 #define HLE_RELEASE ".byte 0xf3;" 2736 static inline kmp_uint32 swap4(kmp_uint32
volatile *p, kmp_uint32 v) {
2737 __asm__
volatile(HLE_ACQUIRE
"xchg %1,%0" :
"+r"(v),
"+m"(*p) : :
"memory");
2741 static void __kmp_destroy_hle_lock(kmp_dyna_lock_t *lck) { TCW_4(*lck, 0); }
2743 static void __kmp_destroy_hle_lock_with_checks(kmp_dyna_lock_t *lck) {
2747 static void __kmp_acquire_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2749 if (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle)) {
2752 while (*(kmp_uint32
volatile *)lck != KMP_LOCK_FREE(hle)) {
2753 for (
int i = delay; i != 0; --i)
2755 delay = ((delay << 1) | 1) & 7;
2757 }
while (swap4(lck, KMP_LOCK_BUSY(1, hle)) != KMP_LOCK_FREE(hle));
2761 static void __kmp_acquire_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2763 __kmp_acquire_hle_lock(lck, gtid);
2766 static int __kmp_release_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2767 __asm__
volatile(HLE_RELEASE
"movl %1,%0" 2769 :
"r"(KMP_LOCK_FREE(hle))
2771 return KMP_LOCK_RELEASED;
2774 static int __kmp_release_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2776 return __kmp_release_hle_lock(lck, gtid);
2779 static int __kmp_test_hle_lock(kmp_dyna_lock_t *lck, kmp_int32 gtid) {
2780 return swap4(lck, KMP_LOCK_BUSY(1, hle)) == KMP_LOCK_FREE(hle);
2783 static int __kmp_test_hle_lock_with_checks(kmp_dyna_lock_t *lck,
2785 return __kmp_test_hle_lock(lck, gtid);
2788 static void __kmp_init_rtm_lock(kmp_queuing_lock_t *lck) {
2789 __kmp_init_queuing_lock(lck);
2792 static void __kmp_destroy_rtm_lock(kmp_queuing_lock_t *lck) {
2793 __kmp_destroy_queuing_lock(lck);
2796 static void __kmp_destroy_rtm_lock_with_checks(kmp_queuing_lock_t *lck) {
2797 __kmp_destroy_queuing_lock_with_checks(lck);
2800 static void __kmp_acquire_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2801 unsigned retries = 3, status;
2804 if (status == _XBEGIN_STARTED) {
2805 if (__kmp_is_unlocked_queuing_lock(lck))
2809 if ((status & _XABORT_EXPLICIT) && _XABORT_CODE(status) == 0xff) {
2811 while (!__kmp_is_unlocked_queuing_lock(lck))
2813 }
else if (!(status & _XABORT_RETRY))
2815 }
while (retries--);
2818 __kmp_acquire_queuing_lock(lck, gtid);
2821 static void __kmp_acquire_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2823 __kmp_acquire_rtm_lock(lck, gtid);
2826 static int __kmp_release_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2827 if (__kmp_is_unlocked_queuing_lock(lck)) {
2832 __kmp_release_queuing_lock(lck, gtid);
2834 return KMP_LOCK_RELEASED;
2837 static int __kmp_release_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2839 return __kmp_release_rtm_lock(lck, gtid);
2842 static int __kmp_test_rtm_lock(kmp_queuing_lock_t *lck, kmp_int32 gtid) {
2843 unsigned retries = 3, status;
2846 if (status == _XBEGIN_STARTED && __kmp_is_unlocked_queuing_lock(lck)) {
2849 if (!(status & _XABORT_RETRY))
2851 }
while (retries--);
2853 return (__kmp_is_unlocked_queuing_lock(lck)) ? 1 : 0;
2856 static int __kmp_test_rtm_lock_with_checks(kmp_queuing_lock_t *lck,
2858 return __kmp_test_rtm_lock(lck, gtid);
2861 #endif // KMP_USE_TSX 2864 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *l,
2865 kmp_dyna_lockseq_t tag);
2866 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock);
2867 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2868 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2869 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32);
2870 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2872 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2874 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
2878 #define KMP_FOREACH_LOCK_KIND(m, a) m(ticket, a) m(queuing, a) m(drdpa, a) 2880 #define expand1(lk, op) \ 2881 static void __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock) { \ 2882 __kmp_##op##_##lk##_##lock(&lock->lk); \ 2884 #define expand2(lk, op) \ 2885 static int __kmp_##op##_##lk##_##lock(kmp_user_lock_p lock, \ 2887 return __kmp_##op##_##lk##_##lock(&lock->lk, gtid); \ 2889 #define expand3(lk, op) \ 2890 static void __kmp_set_##lk##_##lock_flags(kmp_user_lock_p lock, \ 2891 kmp_lock_flags_t flags) { \ 2892 __kmp_set_##lk##_lock_flags(&lock->lk, flags); \ 2894 #define expand4(lk, op) \ 2895 static void __kmp_set_##lk##_##lock_location(kmp_user_lock_p lock, \ 2896 const ident_t *loc) { \ 2897 __kmp_set_##lk##_lock_location(&lock->lk, loc); \ 2900 KMP_FOREACH_LOCK_KIND(expand1, init)
2901 KMP_FOREACH_LOCK_KIND(expand1, init_nested)
2902 KMP_FOREACH_LOCK_KIND(expand1, destroy)
2903 KMP_FOREACH_LOCK_KIND(expand1, destroy_nested)
2904 KMP_FOREACH_LOCK_KIND(expand2, acquire)
2905 KMP_FOREACH_LOCK_KIND(expand2, acquire_nested)
2906 KMP_FOREACH_LOCK_KIND(expand2, release)
2907 KMP_FOREACH_LOCK_KIND(expand2, release_nested)
2908 KMP_FOREACH_LOCK_KIND(expand2, test)
2909 KMP_FOREACH_LOCK_KIND(expand2, test_nested)
2910 KMP_FOREACH_LOCK_KIND(expand3, )
2911 KMP_FOREACH_LOCK_KIND(expand4, )
2922 #define expand(l, op) 0, __kmp_init_direct_lock, 2923 void (*__kmp_direct_init[])(kmp_dyna_lock_t *, kmp_dyna_lockseq_t) = {
2924 __kmp_init_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, init)};
2928 #define expand(l, op) 0, (void (*)(kmp_dyna_lock_t *))__kmp_##op##_##l##_lock, 2929 static void (*direct_destroy[])(kmp_dyna_lock_t *) = {
2930 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
2932 #define expand(l, op) \ 2933 0, (void (*)(kmp_dyna_lock_t *))__kmp_destroy_##l##_lock_with_checks, 2934 static void (*direct_destroy_check[])(kmp_dyna_lock_t *) = {
2935 __kmp_destroy_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, destroy)};
2939 #define expand(l, op) \ 2940 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock, 2941 static int (*direct_set[])(kmp_dyna_lock_t *, kmp_int32) = {
2942 __kmp_set_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, acquire)};
2944 #define expand(l, op) \ 2945 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks, 2946 static int (*direct_set_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2947 __kmp_set_indirect_lock_with_checks, 0,
2948 KMP_FOREACH_D_LOCK(expand, acquire)};
2952 #define expand(l, op) \ 2953 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock, 2954 static int (*direct_unset[])(kmp_dyna_lock_t *, kmp_int32) = {
2955 __kmp_unset_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, release)};
2956 static int (*direct_test[])(kmp_dyna_lock_t *, kmp_int32) = {
2957 __kmp_test_indirect_lock, 0, KMP_FOREACH_D_LOCK(expand, test)};
2959 #define expand(l, op) \ 2960 0, (int (*)(kmp_dyna_lock_t *, kmp_int32))__kmp_##op##_##l##_lock_with_checks, 2961 static int (*direct_unset_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2962 __kmp_unset_indirect_lock_with_checks, 0,
2963 KMP_FOREACH_D_LOCK(expand, release)};
2964 static int (*direct_test_check[])(kmp_dyna_lock_t *, kmp_int32) = {
2965 __kmp_test_indirect_lock_with_checks, 0, KMP_FOREACH_D_LOCK(expand, test)};
2969 void (*(*__kmp_direct_destroy))(kmp_dyna_lock_t *) = 0;
2970 int (*(*__kmp_direct_set))(kmp_dyna_lock_t *, kmp_int32) = 0;
2971 int (*(*__kmp_direct_unset))(kmp_dyna_lock_t *, kmp_int32) = 0;
2972 int (*(*__kmp_direct_test))(kmp_dyna_lock_t *, kmp_int32) = 0;
2975 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock, 2976 void (*__kmp_indirect_init[])(kmp_user_lock_p) = {
2977 KMP_FOREACH_I_LOCK(expand, init)};
2980 #define expand(l, op) (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock, 2981 static void (*indirect_destroy[])(kmp_user_lock_p) = {
2982 KMP_FOREACH_I_LOCK(expand, destroy)};
2984 #define expand(l, op) \ 2985 (void (*)(kmp_user_lock_p)) __kmp_##op##_##l##_##lock_with_checks, 2986 static void (*indirect_destroy_check[])(kmp_user_lock_p) = {
2987 KMP_FOREACH_I_LOCK(expand, destroy)};
2991 #define expand(l, op) \ 2992 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock, 2993 static int (*indirect_set[])(kmp_user_lock_p,
2994 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, acquire)};
2996 #define expand(l, op) \ 2997 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks, 2998 static int (*indirect_set_check[])(kmp_user_lock_p, kmp_int32) = {
2999 KMP_FOREACH_I_LOCK(expand, acquire)};
3003 #define expand(l, op) \ 3004 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock, 3005 static int (*indirect_unset[])(kmp_user_lock_p, kmp_int32) = {
3006 KMP_FOREACH_I_LOCK(expand, release)};
3007 static int (*indirect_test[])(kmp_user_lock_p,
3008 kmp_int32) = {KMP_FOREACH_I_LOCK(expand, test)};
3010 #define expand(l, op) \ 3011 (int (*)(kmp_user_lock_p, kmp_int32)) __kmp_##op##_##l##_##lock_with_checks, 3012 static int (*indirect_unset_check[])(kmp_user_lock_p, kmp_int32) = {
3013 KMP_FOREACH_I_LOCK(expand, release)};
3014 static int (*indirect_test_check[])(kmp_user_lock_p, kmp_int32) = {
3015 KMP_FOREACH_I_LOCK(expand, test)};
3019 void (*(*__kmp_indirect_destroy))(kmp_user_lock_p) = 0;
3020 int (*(*__kmp_indirect_set))(kmp_user_lock_p, kmp_int32) = 0;
3021 int (*(*__kmp_indirect_unset))(kmp_user_lock_p, kmp_int32) = 0;
3022 int (*(*__kmp_indirect_test))(kmp_user_lock_p, kmp_int32) = 0;
3025 kmp_indirect_lock_table_t __kmp_i_lock_table;
3028 static kmp_uint32 __kmp_indirect_lock_size[KMP_NUM_I_LOCKS] = {0};
3031 void (*__kmp_indirect_set_location[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
3033 void (*__kmp_indirect_set_flags[KMP_NUM_I_LOCKS])(kmp_user_lock_p,
3034 kmp_lock_flags_t) = {0};
3035 const ident_t *(*__kmp_indirect_get_location[KMP_NUM_I_LOCKS])(
3036 kmp_user_lock_p) = {0};
3037 kmp_lock_flags_t (*__kmp_indirect_get_flags[KMP_NUM_I_LOCKS])(
3038 kmp_user_lock_p) = {0};
3041 static kmp_indirect_lock_t *__kmp_indirect_lock_pool[KMP_NUM_I_LOCKS] = {0};
3048 kmp_indirect_lock_t *__kmp_allocate_indirect_lock(
void **user_lock,
3050 kmp_indirect_locktag_t tag) {
3051 kmp_indirect_lock_t *lck;
3052 kmp_lock_index_t idx;
3054 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3056 if (__kmp_indirect_lock_pool[tag] != NULL) {
3058 lck = __kmp_indirect_lock_pool[tag];
3059 if (OMP_LOCK_T_SIZE <
sizeof(
void *))
3060 idx = lck->lock->pool.index;
3061 __kmp_indirect_lock_pool[tag] = (kmp_indirect_lock_t *)lck->lock->pool.next;
3062 KA_TRACE(20, (
"__kmp_allocate_indirect_lock: reusing an existing lock %p\n",
3065 idx = __kmp_i_lock_table.next;
3067 if (idx == __kmp_i_lock_table.size) {
3069 int row = __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK;
3070 kmp_indirect_lock_t **new_table = (kmp_indirect_lock_t **)__kmp_allocate(
3071 2 * row *
sizeof(kmp_indirect_lock_t *));
3072 KMP_MEMCPY(new_table, __kmp_i_lock_table.table,
3073 row *
sizeof(kmp_indirect_lock_t *));
3074 kmp_indirect_lock_t **old_table = __kmp_i_lock_table.table;
3075 __kmp_i_lock_table.table = new_table;
3076 __kmp_free(old_table);
3078 for (
int i = row; i < 2 * row; ++i)
3079 *(__kmp_i_lock_table.table + i) = (kmp_indirect_lock_t *)__kmp_allocate(
3080 KMP_I_LOCK_CHUNK *
sizeof(kmp_indirect_lock_t));
3081 __kmp_i_lock_table.size = 2 * idx;
3083 __kmp_i_lock_table.next++;
3084 lck = KMP_GET_I_LOCK(idx);
3086 lck->lock = (kmp_user_lock_p)__kmp_allocate(__kmp_indirect_lock_size[tag]);
3088 (
"__kmp_allocate_indirect_lock: allocated a new lock %p\n", lck));
3091 __kmp_release_lock(&__kmp_global_lock, gtid);
3095 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3096 *((kmp_lock_index_t *)user_lock) = idx
3099 *((kmp_indirect_lock_t **)user_lock) = lck;
3106 static __forceinline kmp_indirect_lock_t *
3107 __kmp_lookup_indirect_lock(
void **user_lock,
const char *func) {
3108 if (__kmp_env_consistency_check) {
3109 kmp_indirect_lock_t *lck = NULL;
3110 if (user_lock == NULL) {
3111 KMP_FATAL(LockIsUninitialized, func);
3113 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3114 kmp_lock_index_t idx = KMP_EXTRACT_I_INDEX(user_lock);
3115 if (idx >= __kmp_i_lock_table.size) {
3116 KMP_FATAL(LockIsUninitialized, func);
3118 lck = KMP_GET_I_LOCK(idx);
3120 lck = *((kmp_indirect_lock_t **)user_lock);
3123 KMP_FATAL(LockIsUninitialized, func);
3127 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3128 return KMP_GET_I_LOCK(KMP_EXTRACT_I_INDEX(user_lock));
3130 return *((kmp_indirect_lock_t **)user_lock);
3135 static void __kmp_init_indirect_lock(kmp_dyna_lock_t *lock,
3136 kmp_dyna_lockseq_t seq) {
3137 #if KMP_USE_ADAPTIVE_LOCKS 3138 if (seq == lockseq_adaptive && !__kmp_cpuinfo.rtm) {
3139 KMP_WARNING(AdaptiveNotSupported,
"kmp_lockseq_t",
"adaptive");
3140 seq = lockseq_queuing;
3144 if (seq == lockseq_rtm && !__kmp_cpuinfo.rtm) {
3145 seq = lockseq_queuing;
3148 kmp_indirect_locktag_t tag = KMP_GET_I_TAG(seq);
3149 kmp_indirect_lock_t *l =
3150 __kmp_allocate_indirect_lock((
void **)lock, __kmp_entry_gtid(), tag);
3151 KMP_I_LOCK_FUNC(l, init)(l->lock);
3153 20, (
"__kmp_init_indirect_lock: initialized indirect lock with type#%d\n",
3157 static void __kmp_destroy_indirect_lock(kmp_dyna_lock_t *lock) {
3158 kmp_uint32 gtid = __kmp_entry_gtid();
3159 kmp_indirect_lock_t *l =
3160 __kmp_lookup_indirect_lock((
void **)lock,
"omp_destroy_lock");
3161 KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3162 kmp_indirect_locktag_t tag = l->type;
3164 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3167 l->lock->pool.next = (kmp_user_lock_p)__kmp_indirect_lock_pool[tag];
3168 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3169 l->lock->pool.index = KMP_EXTRACT_I_INDEX(lock);
3171 __kmp_indirect_lock_pool[tag] = l;
3173 __kmp_release_lock(&__kmp_global_lock, gtid);
3176 static int __kmp_set_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3177 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3178 return KMP_I_LOCK_FUNC(l,
set)(l->lock, gtid);
3181 static int __kmp_unset_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3182 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3183 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3186 static int __kmp_test_indirect_lock(kmp_dyna_lock_t *lock, kmp_int32 gtid) {
3187 kmp_indirect_lock_t *l = KMP_LOOKUP_I_LOCK(lock);
3188 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3191 static int __kmp_set_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3193 kmp_indirect_lock_t *l =
3194 __kmp_lookup_indirect_lock((
void **)lock,
"omp_set_lock");
3195 return KMP_I_LOCK_FUNC(l,
set)(l->lock, gtid);
3198 static int __kmp_unset_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3200 kmp_indirect_lock_t *l =
3201 __kmp_lookup_indirect_lock((
void **)lock,
"omp_unset_lock");
3202 return KMP_I_LOCK_FUNC(l, unset)(l->lock, gtid);
3205 static int __kmp_test_indirect_lock_with_checks(kmp_dyna_lock_t *lock,
3207 kmp_indirect_lock_t *l =
3208 __kmp_lookup_indirect_lock((
void **)lock,
"omp_test_lock");
3209 return KMP_I_LOCK_FUNC(l, test)(l->lock, gtid);
3212 kmp_dyna_lockseq_t __kmp_user_lock_seq = lockseq_queuing;
3215 kmp_int32 __kmp_get_user_lock_owner(kmp_user_lock_p lck, kmp_uint32 seq) {
3218 case lockseq_nested_tas:
3219 return __kmp_get_tas_lock_owner((kmp_tas_lock_t *)lck);
3222 case lockseq_nested_futex:
3223 return __kmp_get_futex_lock_owner((kmp_futex_lock_t *)lck);
3225 case lockseq_ticket:
3226 case lockseq_nested_ticket:
3227 return __kmp_get_ticket_lock_owner((kmp_ticket_lock_t *)lck);
3228 case lockseq_queuing:
3229 case lockseq_nested_queuing:
3230 #if KMP_USE_ADAPTIVE_LOCKS 3231 case lockseq_adaptive:
3233 return __kmp_get_queuing_lock_owner((kmp_queuing_lock_t *)lck);
3235 case lockseq_nested_drdpa:
3236 return __kmp_get_drdpa_lock_owner((kmp_drdpa_lock_t *)lck);
3243 void __kmp_init_dynamic_user_locks() {
3245 if (__kmp_env_consistency_check) {
3246 __kmp_direct_set = direct_set_check;
3247 __kmp_direct_unset = direct_unset_check;
3248 __kmp_direct_test = direct_test_check;
3249 __kmp_direct_destroy = direct_destroy_check;
3250 __kmp_indirect_set = indirect_set_check;
3251 __kmp_indirect_unset = indirect_unset_check;
3252 __kmp_indirect_test = indirect_test_check;
3253 __kmp_indirect_destroy = indirect_destroy_check;
3255 __kmp_direct_set = direct_set;
3256 __kmp_direct_unset = direct_unset;
3257 __kmp_direct_test = direct_test;
3258 __kmp_direct_destroy = direct_destroy;
3259 __kmp_indirect_set = indirect_set;
3260 __kmp_indirect_unset = indirect_unset;
3261 __kmp_indirect_test = indirect_test;
3262 __kmp_indirect_destroy = indirect_destroy;
3267 if (__kmp_init_user_locks)
3271 __kmp_i_lock_table.size = KMP_I_LOCK_CHUNK;
3272 __kmp_i_lock_table.table =
3273 (kmp_indirect_lock_t **)__kmp_allocate(
sizeof(kmp_indirect_lock_t *));
3274 *(__kmp_i_lock_table.table) = (kmp_indirect_lock_t *)__kmp_allocate(
3275 KMP_I_LOCK_CHUNK *
sizeof(kmp_indirect_lock_t));
3276 __kmp_i_lock_table.next = 0;
3279 __kmp_indirect_lock_size[locktag_ticket] =
sizeof(kmp_ticket_lock_t);
3280 __kmp_indirect_lock_size[locktag_queuing] =
sizeof(kmp_queuing_lock_t);
3281 #if KMP_USE_ADAPTIVE_LOCKS 3282 __kmp_indirect_lock_size[locktag_adaptive] =
sizeof(kmp_adaptive_lock_t);
3284 __kmp_indirect_lock_size[locktag_drdpa] =
sizeof(kmp_drdpa_lock_t);
3286 __kmp_indirect_lock_size[locktag_rtm] =
sizeof(kmp_queuing_lock_t);
3288 __kmp_indirect_lock_size[locktag_nested_tas] =
sizeof(kmp_tas_lock_t);
3290 __kmp_indirect_lock_size[locktag_nested_futex] =
sizeof(kmp_futex_lock_t);
3292 __kmp_indirect_lock_size[locktag_nested_ticket] =
sizeof(kmp_ticket_lock_t);
3293 __kmp_indirect_lock_size[locktag_nested_queuing] =
sizeof(kmp_queuing_lock_t);
3294 __kmp_indirect_lock_size[locktag_nested_drdpa] =
sizeof(kmp_drdpa_lock_t);
3297 #define fill_jumps(table, expand, sep) \ 3299 table[locktag##sep##ticket] = expand(ticket); \ 3300 table[locktag##sep##queuing] = expand(queuing); \ 3301 table[locktag##sep##drdpa] = expand(drdpa); \ 3304 #if KMP_USE_ADAPTIVE_LOCKS 3305 #define fill_table(table, expand) \ 3307 fill_jumps(table, expand, _); \ 3308 table[locktag_adaptive] = expand(queuing); \ 3309 fill_jumps(table, expand, _nested_); \ 3312 #define fill_table(table, expand) \ 3314 fill_jumps(table, expand, _); \ 3315 fill_jumps(table, expand, _nested_); \ 3317 #endif // KMP_USE_ADAPTIVE_LOCKS 3320 (void (*)(kmp_user_lock_p, const ident_t *)) __kmp_set_##l##_lock_location 3321 fill_table(__kmp_indirect_set_location, expand);
3324 (void (*)(kmp_user_lock_p, kmp_lock_flags_t)) __kmp_set_##l##_lock_flags 3325 fill_table(__kmp_indirect_set_flags, expand);
3328 (const ident_t *(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_location 3329 fill_table(__kmp_indirect_get_location, expand);
3332 (kmp_lock_flags_t(*)(kmp_user_lock_p)) __kmp_get_##l##_lock_flags 3333 fill_table(__kmp_indirect_get_flags, expand);
3336 __kmp_init_user_locks = TRUE;
3340 void __kmp_cleanup_indirect_user_locks() {
3346 for (k = 0; k < KMP_NUM_I_LOCKS; ++k) {
3347 kmp_indirect_lock_t *l = __kmp_indirect_lock_pool[k];
3349 kmp_indirect_lock_t *ll = l;
3350 l = (kmp_indirect_lock_t *)l->lock->pool.next;
3351 KA_TRACE(20, (
"__kmp_cleanup_indirect_user_locks: freeing %p from pool\n",
3353 __kmp_free(ll->lock);
3356 __kmp_indirect_lock_pool[k] = NULL;
3359 for (i = 0; i < __kmp_i_lock_table.next; i++) {
3360 kmp_indirect_lock_t *l = KMP_GET_I_LOCK(i);
3361 if (l->lock != NULL) {
3363 KMP_I_LOCK_FUNC(l, destroy)(l->lock);
3366 (
"__kmp_cleanup_indirect_user_locks: destroy/freeing %p from table\n",
3368 __kmp_free(l->lock);
3372 for (i = 0; i < __kmp_i_lock_table.size / KMP_I_LOCK_CHUNK; i++)
3373 __kmp_free(__kmp_i_lock_table.table[i]);
3374 __kmp_free(__kmp_i_lock_table.table);
3376 __kmp_init_user_locks = FALSE;
3379 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3380 int __kmp_num_locks_in_block = 1;
3382 #else // KMP_USE_DYNAMIC_LOCK 3384 static void __kmp_init_tas_lock_with_checks(kmp_tas_lock_t *lck) {
3385 __kmp_init_tas_lock(lck);
3388 static void __kmp_init_nested_tas_lock_with_checks(kmp_tas_lock_t *lck) {
3389 __kmp_init_nested_tas_lock(lck);
3393 static void __kmp_init_futex_lock_with_checks(kmp_futex_lock_t *lck) {
3394 __kmp_init_futex_lock(lck);
3397 static void __kmp_init_nested_futex_lock_with_checks(kmp_futex_lock_t *lck) {
3398 __kmp_init_nested_futex_lock(lck);
3402 static int __kmp_is_ticket_lock_initialized(kmp_ticket_lock_t *lck) {
3403 return lck == lck->lk.self;
3406 static void __kmp_init_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
3407 __kmp_init_ticket_lock(lck);
3410 static void __kmp_init_nested_ticket_lock_with_checks(kmp_ticket_lock_t *lck) {
3411 __kmp_init_nested_ticket_lock(lck);
3414 static int __kmp_is_queuing_lock_initialized(kmp_queuing_lock_t *lck) {
3415 return lck == lck->lk.initialized;
3418 static void __kmp_init_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
3419 __kmp_init_queuing_lock(lck);
3423 __kmp_init_nested_queuing_lock_with_checks(kmp_queuing_lock_t *lck) {
3424 __kmp_init_nested_queuing_lock(lck);
3427 #if KMP_USE_ADAPTIVE_LOCKS 3428 static void __kmp_init_adaptive_lock_with_checks(kmp_adaptive_lock_t *lck) {
3429 __kmp_init_adaptive_lock(lck);
3433 static int __kmp_is_drdpa_lock_initialized(kmp_drdpa_lock_t *lck) {
3434 return lck == lck->lk.initialized;
3437 static void __kmp_init_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
3438 __kmp_init_drdpa_lock(lck);
3441 static void __kmp_init_nested_drdpa_lock_with_checks(kmp_drdpa_lock_t *lck) {
3442 __kmp_init_nested_drdpa_lock(lck);
3449 enum kmp_lock_kind __kmp_user_lock_kind = lk_default;
3451 size_t __kmp_base_user_lock_size = 0;
3452 size_t __kmp_user_lock_size = 0;
3454 kmp_int32 (*__kmp_get_user_lock_owner_)(kmp_user_lock_p lck) = NULL;
3455 int (*__kmp_acquire_user_lock_with_checks_)(kmp_user_lock_p lck,
3456 kmp_int32 gtid) = NULL;
3458 int (*__kmp_test_user_lock_with_checks_)(kmp_user_lock_p lck,
3459 kmp_int32 gtid) = NULL;
3460 int (*__kmp_release_user_lock_with_checks_)(kmp_user_lock_p lck,
3461 kmp_int32 gtid) = NULL;
3462 void (*__kmp_init_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3463 void (*__kmp_destroy_user_lock_)(kmp_user_lock_p lck) = NULL;
3464 void (*__kmp_destroy_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3465 int (*__kmp_acquire_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3466 kmp_int32 gtid) = NULL;
3468 int (*__kmp_test_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3469 kmp_int32 gtid) = NULL;
3470 int (*__kmp_release_nested_user_lock_with_checks_)(kmp_user_lock_p lck,
3471 kmp_int32 gtid) = NULL;
3472 void (*__kmp_init_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3473 void (*__kmp_destroy_nested_user_lock_with_checks_)(kmp_user_lock_p lck) = NULL;
3475 int (*__kmp_is_user_lock_initialized_)(kmp_user_lock_p lck) = NULL;
3476 const ident_t *(*__kmp_get_user_lock_location_)(kmp_user_lock_p lck) = NULL;
3477 void (*__kmp_set_user_lock_location_)(kmp_user_lock_p lck,
3479 kmp_lock_flags_t (*__kmp_get_user_lock_flags_)(kmp_user_lock_p lck) = NULL;
3480 void (*__kmp_set_user_lock_flags_)(kmp_user_lock_p lck,
3481 kmp_lock_flags_t flags) = NULL;
3483 void __kmp_set_user_lock_vptrs(kmp_lock_kind_t user_lock_kind) {
3484 switch (user_lock_kind) {
3490 __kmp_base_user_lock_size =
sizeof(kmp_base_tas_lock_t);
3491 __kmp_user_lock_size =
sizeof(kmp_tas_lock_t);
3493 __kmp_get_user_lock_owner_ =
3494 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_tas_lock_owner);
3496 if (__kmp_env_consistency_check) {
3497 KMP_BIND_USER_LOCK_WITH_CHECKS(tas);
3498 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(tas);
3500 KMP_BIND_USER_LOCK(tas);
3501 KMP_BIND_NESTED_USER_LOCK(tas);
3504 __kmp_destroy_user_lock_ =
3505 (void (*)(kmp_user_lock_p))(&__kmp_destroy_tas_lock);
3507 __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3509 __kmp_get_user_lock_location_ = (
const ident_t *(*)(kmp_user_lock_p))NULL;
3511 __kmp_set_user_lock_location_ =
3512 (void (*)(kmp_user_lock_p,
const ident_t *))NULL;
3514 __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3516 __kmp_set_user_lock_flags_ =
3517 (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3523 __kmp_base_user_lock_size =
sizeof(kmp_base_futex_lock_t);
3524 __kmp_user_lock_size =
sizeof(kmp_futex_lock_t);
3526 __kmp_get_user_lock_owner_ =
3527 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_futex_lock_owner);
3529 if (__kmp_env_consistency_check) {
3530 KMP_BIND_USER_LOCK_WITH_CHECKS(futex);
3531 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(futex);
3533 KMP_BIND_USER_LOCK(futex);
3534 KMP_BIND_NESTED_USER_LOCK(futex);
3537 __kmp_destroy_user_lock_ =
3538 (void (*)(kmp_user_lock_p))(&__kmp_destroy_futex_lock);
3540 __kmp_is_user_lock_initialized_ = (int (*)(kmp_user_lock_p))NULL;
3542 __kmp_get_user_lock_location_ = (
const ident_t *(*)(kmp_user_lock_p))NULL;
3544 __kmp_set_user_lock_location_ =
3545 (void (*)(kmp_user_lock_p,
const ident_t *))NULL;
3547 __kmp_get_user_lock_flags_ = (kmp_lock_flags_t(*)(kmp_user_lock_p))NULL;
3549 __kmp_set_user_lock_flags_ =
3550 (void (*)(kmp_user_lock_p, kmp_lock_flags_t))NULL;
3553 #endif // KMP_USE_FUTEX 3556 __kmp_base_user_lock_size =
sizeof(kmp_base_ticket_lock_t);
3557 __kmp_user_lock_size =
sizeof(kmp_ticket_lock_t);
3559 __kmp_get_user_lock_owner_ =
3560 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_owner);
3562 if (__kmp_env_consistency_check) {
3563 KMP_BIND_USER_LOCK_WITH_CHECKS(ticket);
3564 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(ticket);
3566 KMP_BIND_USER_LOCK(ticket);
3567 KMP_BIND_NESTED_USER_LOCK(ticket);
3570 __kmp_destroy_user_lock_ =
3571 (void (*)(kmp_user_lock_p))(&__kmp_destroy_ticket_lock);
3573 __kmp_is_user_lock_initialized_ =
3574 (int (*)(kmp_user_lock_p))(&__kmp_is_ticket_lock_initialized);
3576 __kmp_get_user_lock_location_ =
3577 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_location);
3579 __kmp_set_user_lock_location_ = (void (*)(
3580 kmp_user_lock_p,
const ident_t *))(&__kmp_set_ticket_lock_location);
3582 __kmp_get_user_lock_flags_ =
3583 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_ticket_lock_flags);
3585 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3586 &__kmp_set_ticket_lock_flags);
3590 __kmp_base_user_lock_size =
sizeof(kmp_base_queuing_lock_t);
3591 __kmp_user_lock_size =
sizeof(kmp_queuing_lock_t);
3593 __kmp_get_user_lock_owner_ =
3594 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3596 if (__kmp_env_consistency_check) {
3597 KMP_BIND_USER_LOCK_WITH_CHECKS(queuing);
3598 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(queuing);
3600 KMP_BIND_USER_LOCK(queuing);
3601 KMP_BIND_NESTED_USER_LOCK(queuing);
3604 __kmp_destroy_user_lock_ =
3605 (void (*)(kmp_user_lock_p))(&__kmp_destroy_queuing_lock);
3607 __kmp_is_user_lock_initialized_ =
3608 (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3610 __kmp_get_user_lock_location_ =
3611 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3613 __kmp_set_user_lock_location_ = (void (*)(
3614 kmp_user_lock_p,
const ident_t *))(&__kmp_set_queuing_lock_location);
3616 __kmp_get_user_lock_flags_ =
3617 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3619 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3620 &__kmp_set_queuing_lock_flags);
3623 #if KMP_USE_ADAPTIVE_LOCKS 3625 __kmp_base_user_lock_size =
sizeof(kmp_base_adaptive_lock_t);
3626 __kmp_user_lock_size =
sizeof(kmp_adaptive_lock_t);
3628 __kmp_get_user_lock_owner_ =
3629 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_owner);
3631 if (__kmp_env_consistency_check) {
3632 KMP_BIND_USER_LOCK_WITH_CHECKS(adaptive);
3634 KMP_BIND_USER_LOCK(adaptive);
3637 __kmp_destroy_user_lock_ =
3638 (void (*)(kmp_user_lock_p))(&__kmp_destroy_adaptive_lock);
3640 __kmp_is_user_lock_initialized_ =
3641 (int (*)(kmp_user_lock_p))(&__kmp_is_queuing_lock_initialized);
3643 __kmp_get_user_lock_location_ =
3644 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_location);
3646 __kmp_set_user_lock_location_ = (void (*)(
3647 kmp_user_lock_p,
const ident_t *))(&__kmp_set_queuing_lock_location);
3649 __kmp_get_user_lock_flags_ =
3650 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_queuing_lock_flags);
3652 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3653 &__kmp_set_queuing_lock_flags);
3656 #endif // KMP_USE_ADAPTIVE_LOCKS 3659 __kmp_base_user_lock_size =
sizeof(kmp_base_drdpa_lock_t);
3660 __kmp_user_lock_size =
sizeof(kmp_drdpa_lock_t);
3662 __kmp_get_user_lock_owner_ =
3663 (kmp_int32(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_owner);
3665 if (__kmp_env_consistency_check) {
3666 KMP_BIND_USER_LOCK_WITH_CHECKS(drdpa);
3667 KMP_BIND_NESTED_USER_LOCK_WITH_CHECKS(drdpa);
3669 KMP_BIND_USER_LOCK(drdpa);
3670 KMP_BIND_NESTED_USER_LOCK(drdpa);
3673 __kmp_destroy_user_lock_ =
3674 (void (*)(kmp_user_lock_p))(&__kmp_destroy_drdpa_lock);
3676 __kmp_is_user_lock_initialized_ =
3677 (int (*)(kmp_user_lock_p))(&__kmp_is_drdpa_lock_initialized);
3679 __kmp_get_user_lock_location_ =
3680 (
const ident_t *(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_location);
3682 __kmp_set_user_lock_location_ = (void (*)(
3683 kmp_user_lock_p,
const ident_t *))(&__kmp_set_drdpa_lock_location);
3685 __kmp_get_user_lock_flags_ =
3686 (kmp_lock_flags_t(*)(kmp_user_lock_p))(&__kmp_get_drdpa_lock_flags);
3688 __kmp_set_user_lock_flags_ = (void (*)(kmp_user_lock_p, kmp_lock_flags_t))(
3689 &__kmp_set_drdpa_lock_flags);
3697 kmp_lock_table_t __kmp_user_lock_table = {1, 0, NULL};
3698 kmp_user_lock_p __kmp_lock_pool = NULL;
3701 kmp_block_of_locks *__kmp_lock_blocks = NULL;
3702 int __kmp_num_locks_in_block = 1;
3704 static kmp_lock_index_t __kmp_lock_table_insert(kmp_user_lock_p lck) {
3706 kmp_lock_index_t index;
3707 if (__kmp_user_lock_table.used >= __kmp_user_lock_table.allocated) {
3708 kmp_lock_index_t size;
3709 kmp_user_lock_p *table;
3711 if (__kmp_user_lock_table.allocated == 0) {
3714 size = __kmp_user_lock_table.allocated * 2;
3716 table = (kmp_user_lock_p *)__kmp_allocate(
sizeof(kmp_user_lock_p) * size);
3717 KMP_MEMCPY(table + 1, __kmp_user_lock_table.table + 1,
3718 sizeof(kmp_user_lock_p) * (__kmp_user_lock_table.used - 1));
3719 table[0] = (kmp_user_lock_p)__kmp_user_lock_table.table;
3724 __kmp_user_lock_table.table = table;
3725 __kmp_user_lock_table.allocated = size;
3727 KMP_DEBUG_ASSERT(__kmp_user_lock_table.used <
3728 __kmp_user_lock_table.allocated);
3729 index = __kmp_user_lock_table.used;
3730 __kmp_user_lock_table.table[index] = lck;
3731 ++__kmp_user_lock_table.used;
3735 static kmp_user_lock_p __kmp_lock_block_allocate() {
3737 static int last_index = 0;
3738 if ((last_index >= __kmp_num_locks_in_block) || (__kmp_lock_blocks == NULL)) {
3742 KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3743 size_t space_for_locks = __kmp_user_lock_size * __kmp_num_locks_in_block;
3745 (
char *)__kmp_allocate(space_for_locks +
sizeof(kmp_block_of_locks));
3747 kmp_block_of_locks *new_block =
3748 (kmp_block_of_locks *)(&buffer[space_for_locks]);
3749 new_block->next_block = __kmp_lock_blocks;
3750 new_block->locks = (
void *)buffer;
3753 __kmp_lock_blocks = new_block;
3755 kmp_user_lock_p ret = (kmp_user_lock_p)(&(
3756 ((
char *)(__kmp_lock_blocks->locks))[last_index * __kmp_user_lock_size]));
3763 kmp_user_lock_p __kmp_user_lock_allocate(
void **user_lock, kmp_int32 gtid,
3764 kmp_lock_flags_t flags) {
3765 kmp_user_lock_p lck;
3766 kmp_lock_index_t index;
3767 KMP_DEBUG_ASSERT(user_lock);
3769 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3771 if (__kmp_lock_pool == NULL) {
3776 ANNOTATE_IGNORE_WRITES_BEGIN();
3777 if (__kmp_num_locks_in_block <= 1) {
3778 lck = (kmp_user_lock_p)__kmp_allocate(__kmp_user_lock_size);
3780 lck = __kmp_lock_block_allocate();
3782 ANNOTATE_IGNORE_WRITES_END();
3786 index = __kmp_lock_table_insert(lck);
3789 lck = __kmp_lock_pool;
3790 index = __kmp_lock_pool->pool.index;
3791 __kmp_lock_pool = __kmp_lock_pool->pool.next;
3796 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3797 *((kmp_lock_index_t *)user_lock) = index;
3799 *((kmp_user_lock_p *)user_lock) = lck;
3803 __kmp_set_user_lock_flags(lck, flags);
3805 __kmp_release_lock(&__kmp_global_lock, gtid);
3811 void __kmp_user_lock_free(
void **user_lock, kmp_int32 gtid,
3812 kmp_user_lock_p lck) {
3813 KMP_DEBUG_ASSERT(user_lock != NULL);
3814 KMP_DEBUG_ASSERT(lck != NULL);
3816 __kmp_acquire_lock(&__kmp_global_lock, gtid);
3818 lck->pool.next = __kmp_lock_pool;
3819 __kmp_lock_pool = lck;
3820 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3821 kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3822 KMP_DEBUG_ASSERT(0 < index && index <= __kmp_user_lock_table.used);
3823 lck->pool.index = index;
3826 __kmp_release_lock(&__kmp_global_lock, gtid);
3829 kmp_user_lock_p __kmp_lookup_user_lock(
void **user_lock,
char const *func) {
3830 kmp_user_lock_p lck = NULL;
3832 if (__kmp_env_consistency_check) {
3833 if (user_lock == NULL) {
3834 KMP_FATAL(LockIsUninitialized, func);
3838 if (OMP_LOCK_T_SIZE <
sizeof(
void *)) {
3839 kmp_lock_index_t index = *((kmp_lock_index_t *)user_lock);
3840 if (__kmp_env_consistency_check) {
3841 if (!(0 < index && index < __kmp_user_lock_table.used)) {
3842 KMP_FATAL(LockIsUninitialized, func);
3845 KMP_DEBUG_ASSERT(0 < index && index < __kmp_user_lock_table.used);
3846 KMP_DEBUG_ASSERT(__kmp_user_lock_size > 0);
3847 lck = __kmp_user_lock_table.table[index];
3849 lck = *((kmp_user_lock_p *)user_lock);
3852 if (__kmp_env_consistency_check) {
3854 KMP_FATAL(LockIsUninitialized, func);
3861 void __kmp_cleanup_user_locks(
void) {
3864 __kmp_lock_pool = NULL;
3866 #define IS_CRITICAL(lck) \ 3867 ((__kmp_get_user_lock_flags_ != NULL) && \ 3868 ((*__kmp_get_user_lock_flags_)(lck)&kmp_lf_critical_section)) 3893 while (__kmp_user_lock_table.used > 1) {
3898 kmp_user_lock_p lck =
3899 __kmp_user_lock_table.table[--__kmp_user_lock_table.used];
3901 if ((__kmp_is_user_lock_initialized_ != NULL) &&
3902 (*__kmp_is_user_lock_initialized_)(lck)) {
3906 if (__kmp_env_consistency_check && (!IS_CRITICAL(lck)) &&
3907 ((loc = __kmp_get_user_lock_location(lck)) != NULL) &&
3909 kmp_str_loc_t str_loc = __kmp_str_loc_init(loc->
psource, 0);
3910 KMP_WARNING(CnsLockNotDestroyed, str_loc.file, str_loc.line);
3911 __kmp_str_loc_free(&str_loc);
3915 if (IS_CRITICAL(lck)) {
3918 (
"__kmp_cleanup_user_locks: free critical section lock %p (%p)\n",
3919 lck, *(
void **)lck));
3921 KA_TRACE(20, (
"__kmp_cleanup_user_locks: free lock %p (%p)\n", lck,
3927 __kmp_destroy_user_lock(lck);
3931 if (__kmp_lock_blocks == NULL) {
3939 kmp_user_lock_p *table_ptr = __kmp_user_lock_table.table;
3940 __kmp_user_lock_table.table = NULL;
3941 __kmp_user_lock_table.allocated = 0;
3943 while (table_ptr != NULL) {
3946 kmp_user_lock_p *next = (kmp_user_lock_p *)(table_ptr[0]);
3947 __kmp_free(table_ptr);
3952 kmp_block_of_locks_t *block_ptr = __kmp_lock_blocks;
3953 __kmp_lock_blocks = NULL;
3955 while (block_ptr != NULL) {
3956 kmp_block_of_locks_t *next = block_ptr->next_block;
3957 __kmp_free(block_ptr->locks);
3962 TCW_4(__kmp_init_user_locks, FALSE);
3965 #endif // KMP_USE_DYNAMIC_LOCK