LLVM OpenMP* Runtime Library
kmp_wait_release.h
1 /*
2  * kmp_wait_release.h -- Wait/Release implementation
3  */
4 
5 
6 //===----------------------------------------------------------------------===//
7 //
8 // The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 
16 #ifndef KMP_WAIT_RELEASE_H
17 #define KMP_WAIT_RELEASE_H
18 
19 #include "kmp.h"
20 #include "kmp_itt.h"
21 
38 enum flag_type {
42 };
43 
47 template <typename P>
48 class kmp_flag {
49  volatile P * loc;
51  public:
52  typedef P flag_t;
53  kmp_flag(volatile P *p, flag_type ft) : loc(p), t(ft) {}
57  volatile P * get() { return loc; }
61  void set(volatile P *new_loc) { loc = new_loc; }
65  flag_type get_type() { return t; }
66  // Derived classes must provide the following:
67  /*
68  kmp_info_t * get_waiter(kmp_uint32 i);
69  kmp_uint32 get_num_waiters();
70  bool done_check();
71  bool done_check_val(P old_loc);
72  bool notdone_check();
73  P internal_release();
74  void suspend(int th_gtid);
75  void resume(int th_gtid);
76  P set_sleeping();
77  P unset_sleeping();
78  bool is_sleeping();
79  bool is_any_sleeping();
80  bool is_sleeping_val(P old_loc);
81  int execute_tasks(kmp_info_t *this_thr, kmp_int32 gtid, int final_spin, int *thread_finished
82  USE_ITT_BUILD_ARG(void * itt_sync_obj), kmp_int32 is_constrained);
83  */
84 };
85 
86 /* Spin wait loop that first does pause, then yield, then sleep. A thread that calls __kmp_wait_*
87  must make certain that another thread calls __kmp_release to wake it back up to prevent deadlocks! */
88 template <class C>
89 static inline void
90 __kmp_wait_template(kmp_info_t *this_thr, C *flag, int final_spin
91  USE_ITT_BUILD_ARG(void * itt_sync_obj) )
92 {
93  // NOTE: We may not belong to a team at this point.
94  volatile typename C::flag_t *spin = flag->get();
95  kmp_uint32 spins;
96  kmp_uint32 hibernate;
97  int th_gtid;
98  int tasks_completed = FALSE;
99 
100  KMP_FSYNC_SPIN_INIT(spin, NULL);
101  if (flag->done_check()) {
102  KMP_FSYNC_SPIN_ACQUIRED(spin);
103  return;
104  }
105  th_gtid = this_thr->th.th_info.ds.ds_gtid;
106  KA_TRACE(20, ("__kmp_wait_sleep: T#%d waiting for flag(%p)\n", th_gtid, flag));
107 
108 #if OMPT_SUPPORT && OMPT_BLAME
109  ompt_state_t ompt_state = this_thr->th.ompt_thread_info.state;
110  if (ompt_enabled &&
111  ompt_state != ompt_state_undefined) {
112  if (ompt_state == ompt_state_idle) {
113  if (ompt_callbacks.ompt_callback(ompt_event_idle_begin)) {
114  ompt_callbacks.ompt_callback(ompt_event_idle_begin)(th_gtid + 1);
115  }
116  } else if (ompt_callbacks.ompt_callback(ompt_event_wait_barrier_begin)) {
117  KMP_DEBUG_ASSERT(ompt_state == ompt_state_wait_barrier ||
118  ompt_state == ompt_state_wait_barrier_implicit ||
119  ompt_state == ompt_state_wait_barrier_explicit);
120 
121  ompt_lw_taskteam_t* team = this_thr->th.th_team->t.ompt_serialized_team_info;
122  ompt_parallel_id_t pId;
123  ompt_task_id_t tId;
124  if (team){
125  pId = team->ompt_team_info.parallel_id;
126  tId = team->ompt_task_info.task_id;
127  } else {
128  pId = this_thr->th.th_team->t.ompt_team_info.parallel_id;
129  tId = this_thr->th.th_current_task->ompt_task_info.task_id;
130  }
131  ompt_callbacks.ompt_callback(ompt_event_wait_barrier_begin)(pId, tId);
132  }
133  }
134 #endif
135 
136  // Setup for waiting
137  KMP_INIT_YIELD(spins);
138 
139  if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
140  // The worker threads cannot rely on the team struct existing at this point.
141  // Use the bt values cached in the thread struct instead.
142 #ifdef KMP_ADJUST_BLOCKTIME
143  if (__kmp_zero_bt && !this_thr->th.th_team_bt_set)
144  // Force immediate suspend if not set by user and more threads than available procs
145  hibernate = 0;
146  else
147  hibernate = this_thr->th.th_team_bt_intervals;
148 #else
149  hibernate = this_thr->th.th_team_bt_intervals;
150 #endif /* KMP_ADJUST_BLOCKTIME */
151 
152  /* If the blocktime is nonzero, we want to make sure that we spin wait for the entirety
153  of the specified #intervals, plus up to one interval more. This increment make
154  certain that this thread doesn't go to sleep too soon. */
155  if (hibernate != 0)
156  hibernate++;
157 
158  // Add in the current time value.
159  hibernate += TCR_4(__kmp_global.g.g_time.dt.t_value);
160  KF_TRACE(20, ("__kmp_wait_sleep: T#%d now=%d, hibernate=%d, intervals=%d\n",
161  th_gtid, __kmp_global.g.g_time.dt.t_value, hibernate,
162  hibernate - __kmp_global.g.g_time.dt.t_value));
163  }
164 
165  KMP_MB();
166 
167  // Main wait spin loop
168  while (flag->notdone_check()) {
169  int in_pool;
170 
171  /* If the task team is NULL, it means one of things:
172  1) A newly-created thread is first being released by __kmp_fork_barrier(), and
173  its task team has not been set up yet.
174  2) All tasks have been executed to completion, this thread has decremented the task
175  team's ref ct and possibly deallocated it, and should no longer reference it.
176  3) Tasking is off for this region. This could be because we are in a serialized region
177  (perhaps the outer one), or else tasking was manually disabled (KMP_TASKING=0). */
178  kmp_task_team_t * task_team = NULL;
179  if (__kmp_tasking_mode != tskm_immediate_exec) {
180  task_team = this_thr->th.th_task_team;
181  if (task_team != NULL) {
182  if (TCR_SYNC_4(task_team->tt.tt_active)) {
183  if (KMP_TASKING_ENABLED(task_team))
184  flag->execute_tasks(this_thr, th_gtid, final_spin, &tasks_completed
185  USE_ITT_BUILD_ARG(itt_sync_obj), 0);
186  }
187  else {
188  KMP_DEBUG_ASSERT(!KMP_MASTER_TID(this_thr->th.th_info.ds.ds_tid));
189  this_thr->th.th_task_team = NULL;
190  }
191  } // if
192  } // if
193 
194  KMP_FSYNC_SPIN_PREPARE(spin);
195  if (TCR_4(__kmp_global.g.g_done)) {
196  if (__kmp_global.g.g_abort)
197  __kmp_abort_thread();
198  break;
199  }
200 
201  // If we are oversubscribed, or have waited a bit (and KMP_LIBRARY=throughput), then yield
202  KMP_YIELD(TCR_4(__kmp_nth) > __kmp_avail_proc);
203  // TODO: Should it be number of cores instead of thread contexts? Like:
204  // KMP_YIELD(TCR_4(__kmp_nth) > __kmp_ncores);
205  // Need performance improvement data to make the change...
206  KMP_YIELD_SPIN(spins);
207 
208  // Check if this thread was transferred from a team
209  // to the thread pool (or vice-versa) while spinning.
210  in_pool = !!TCR_4(this_thr->th.th_in_pool);
211  if (in_pool != !!this_thr->th.th_active_in_pool) {
212  if (in_pool) { // Recently transferred from team to pool
213  KMP_TEST_THEN_INC32((kmp_int32 *)&__kmp_thread_pool_active_nth);
214  this_thr->th.th_active_in_pool = TRUE;
215  /* Here, we cannot assert that:
216  KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) <= __kmp_thread_pool_nth);
217  __kmp_thread_pool_nth is inc/dec'd by the master thread while the fork/join
218  lock is held, whereas __kmp_thread_pool_active_nth is inc/dec'd asynchronously
219  by the workers. The two can get out of sync for brief periods of time. */
220  }
221  else { // Recently transferred from pool to team
222  KMP_TEST_THEN_DEC32((kmp_int32 *) &__kmp_thread_pool_active_nth);
223  KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
224  this_thr->th.th_active_in_pool = FALSE;
225  }
226  }
227 
228  // Don't suspend if KMP_BLOCKTIME is set to "infinite"
229  if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME)
230  continue;
231 
232  // Don't suspend if there is a likelihood of new tasks being spawned.
233  if ((task_team != NULL) && TCR_4(task_team->tt.tt_found_tasks))
234  continue;
235 
236  // If we have waited a bit more, fall asleep
237  if (TCR_4(__kmp_global.g.g_time.dt.t_value) < hibernate)
238  continue;
239 
240  KF_TRACE(50, ("__kmp_wait_sleep: T#%d suspend time reached\n", th_gtid));
241 
242  flag->suspend(th_gtid);
243 
244  if (TCR_4(__kmp_global.g.g_done)) {
245  if (__kmp_global.g.g_abort)
246  __kmp_abort_thread();
247  break;
248  }
249  // TODO: If thread is done with work and times out, disband/free
250  }
251 
252 #if OMPT_SUPPORT && OMPT_BLAME
253  if (ompt_enabled &&
254  ompt_state != ompt_state_undefined) {
255  if (ompt_state == ompt_state_idle) {
256  if (ompt_callbacks.ompt_callback(ompt_event_idle_end)) {
257  ompt_callbacks.ompt_callback(ompt_event_idle_end)(th_gtid + 1);
258  }
259  } else if (ompt_callbacks.ompt_callback(ompt_event_wait_barrier_end)) {
260  KMP_DEBUG_ASSERT(ompt_state == ompt_state_wait_barrier ||
261  ompt_state == ompt_state_wait_barrier_implicit ||
262  ompt_state == ompt_state_wait_barrier_explicit);
263 
264  ompt_lw_taskteam_t* team = this_thr->th.th_team->t.ompt_serialized_team_info;
265  ompt_parallel_id_t pId;
266  ompt_task_id_t tId;
267  if (team){
268  pId = team->ompt_team_info.parallel_id;
269  tId = team->ompt_task_info.task_id;
270  } else {
271  pId = this_thr->th.th_team->t.ompt_team_info.parallel_id;
272  tId = this_thr->th.th_current_task->ompt_task_info.task_id;
273  }
274  ompt_callbacks.ompt_callback(ompt_event_wait_barrier_end)(pId, tId);
275  }
276  }
277 #endif
278 
279  KMP_FSYNC_SPIN_ACQUIRED(spin);
280 }
281 
282 /* Release any threads specified as waiting on the flag by releasing the flag and resume the waiting thread
283  if indicated by the sleep bit(s). A thread that calls __kmp_wait_template must call this function to wake
284  up the potentially sleeping thread and prevent deadlocks! */
285 template <class C>
286 static inline void
287 __kmp_release_template(C *flag)
288 {
289 #ifdef KMP_DEBUG
290  int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
291 #endif
292  KF_TRACE(20, ("__kmp_release: T#%d releasing flag(%x)\n", gtid, flag->get()));
293  KMP_DEBUG_ASSERT(flag->get());
294  KMP_FSYNC_RELEASING(flag->get());
295 
296  flag->internal_release();
297 
298  KF_TRACE(100, ("__kmp_release: T#%d set new spin=%d\n", gtid, flag->get(), *(flag->get())));
299 
300  if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
301  // Only need to check sleep stuff if infinite block time not set
302  if (flag->is_any_sleeping()) { // Are *any* of the threads that wait on this flag sleeping?
303  for (unsigned int i=0; i<flag->get_num_waiters(); ++i) {
304  kmp_info_t * waiter = flag->get_waiter(i); // if a sleeping waiter exists at i, sets current_waiter to i inside the flag
305  if (waiter) {
306  int wait_gtid = waiter->th.th_info.ds.ds_gtid;
307  // Wake up thread if needed
308  KF_TRACE(50, ("__kmp_release: T#%d waking up thread T#%d since sleep flag(%p) set\n",
309  gtid, wait_gtid, flag->get()));
310  flag->resume(wait_gtid); // unsets flag's current_waiter when done
311  }
312  }
313  }
314  }
315 }
316 
317 template <typename FlagType>
318 struct flag_traits {};
319 
320 template <>
321 struct flag_traits<kmp_uint32> {
322  typedef kmp_uint32 flag_t;
323  static const flag_type t = flag32;
324  static inline flag_t tcr(flag_t f) { return TCR_4(f); }
325  static inline flag_t test_then_add4(volatile flag_t *f) { return KMP_TEST_THEN_ADD4_32((volatile kmp_int32 *)f); }
326  static inline flag_t test_then_or(volatile flag_t *f, flag_t v) { return KMP_TEST_THEN_OR32((volatile kmp_int32 *)f, v); }
327  static inline flag_t test_then_and(volatile flag_t *f, flag_t v) { return KMP_TEST_THEN_AND32((volatile kmp_int32 *)f, v); }
328 };
329 
330 template <>
331 struct flag_traits<kmp_uint64> {
332  typedef kmp_uint64 flag_t;
333  static const flag_type t = flag64;
334  static inline flag_t tcr(flag_t f) { return TCR_8(f); }
335  static inline flag_t test_then_add4(volatile flag_t *f) { return KMP_TEST_THEN_ADD4_64((volatile kmp_int64 *)f); }
336  static inline flag_t test_then_or(volatile flag_t *f, flag_t v) { return KMP_TEST_THEN_OR64((volatile kmp_int64 *)f, v); }
337  static inline flag_t test_then_and(volatile flag_t *f, flag_t v) { return KMP_TEST_THEN_AND64((volatile kmp_int64 *)f, v); }
338 };
339 
340 template <typename FlagType>
341 class kmp_basic_flag : public kmp_flag<FlagType> {
342  typedef flag_traits<FlagType> traits_type;
343  FlagType checker;
344  kmp_info_t * waiting_threads[1];
345  kmp_uint32 num_waiting_threads;
346  public:
347  kmp_basic_flag(volatile FlagType *p) : kmp_flag<FlagType>(p, traits_type::t), num_waiting_threads(0) {}
348  kmp_basic_flag(volatile FlagType *p, kmp_info_t *thr) : kmp_flag<FlagType>(p, traits_type::t), num_waiting_threads(1) {
349  waiting_threads[0] = thr;
350  }
351  kmp_basic_flag(volatile FlagType *p, FlagType c) : kmp_flag<FlagType>(p, traits_type::t), checker(c), num_waiting_threads(0) {}
356  kmp_info_t * get_waiter(kmp_uint32 i) {
357  KMP_DEBUG_ASSERT(i<num_waiting_threads);
358  return waiting_threads[i];
359  }
363  kmp_uint32 get_num_waiters() { return num_waiting_threads; }
369  void set_waiter(kmp_info_t *thr) {
370  waiting_threads[0] = thr;
371  num_waiting_threads = 1;
372  }
376  bool done_check() { return traits_type::tcr(*(this->get())) == checker; }
381  bool done_check_val(FlagType old_loc) { return old_loc == checker; }
389  bool notdone_check() { return traits_type::tcr(*(this->get())) != checker; }
394  void internal_release() {
395  (void) traits_type::test_then_add4((volatile FlagType *)this->get());
396  }
401  FlagType set_sleeping() {
402  return traits_type::test_then_or((volatile FlagType *)this->get(), KMP_BARRIER_SLEEP_STATE);
403  }
408  FlagType unset_sleeping() {
409  return traits_type::test_then_and((volatile FlagType *)this->get(), ~KMP_BARRIER_SLEEP_STATE);
410  }
415  bool is_sleeping_val(FlagType old_loc) { return old_loc & KMP_BARRIER_SLEEP_STATE; }
419  bool is_sleeping() { return is_sleeping_val(*(this->get())); }
420  bool is_any_sleeping() { return is_sleeping_val(*(this->get())); }
421  kmp_uint8 *get_stolen() { return NULL; }
422  enum barrier_type get_bt() { return bs_last_barrier; }
423 };
424 
425 class kmp_flag_32 : public kmp_basic_flag<kmp_uint32> {
426  public:
427  kmp_flag_32(volatile kmp_uint32 *p) : kmp_basic_flag<kmp_uint32>(p) {}
428  kmp_flag_32(volatile kmp_uint32 *p, kmp_info_t *thr) : kmp_basic_flag<kmp_uint32>(p, thr) {}
429  kmp_flag_32(volatile kmp_uint32 *p, kmp_uint32 c) : kmp_basic_flag<kmp_uint32>(p, c) {}
430  void suspend(int th_gtid) { __kmp_suspend_32(th_gtid, this); }
431  void resume(int th_gtid) { __kmp_resume_32(th_gtid, this); }
432  int execute_tasks(kmp_info_t *this_thr, kmp_int32 gtid, int final_spin, int *thread_finished
433  USE_ITT_BUILD_ARG(void * itt_sync_obj), kmp_int32 is_constrained) {
434  return __kmp_execute_tasks_32(this_thr, gtid, this, final_spin, thread_finished
435  USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
436  }
437  void wait(kmp_info_t *this_thr, int final_spin
438  USE_ITT_BUILD_ARG(void * itt_sync_obj)) {
439  __kmp_wait_template(this_thr, this, final_spin
440  USE_ITT_BUILD_ARG(itt_sync_obj));
441  }
442  void release() { __kmp_release_template(this); }
443  flag_type get_ptr_type() { return flag32; }
444 };
445 
446 class kmp_flag_64 : public kmp_basic_flag<kmp_uint64> {
447  public:
448  kmp_flag_64(volatile kmp_uint64 *p) : kmp_basic_flag<kmp_uint64>(p) {}
449  kmp_flag_64(volatile kmp_uint64 *p, kmp_info_t *thr) : kmp_basic_flag<kmp_uint64>(p, thr) {}
450  kmp_flag_64(volatile kmp_uint64 *p, kmp_uint64 c) : kmp_basic_flag<kmp_uint64>(p, c) {}
451  void suspend(int th_gtid) { __kmp_suspend_64(th_gtid, this); }
452  void resume(int th_gtid) { __kmp_resume_64(th_gtid, this); }
453  int execute_tasks(kmp_info_t *this_thr, kmp_int32 gtid, int final_spin, int *thread_finished
454  USE_ITT_BUILD_ARG(void * itt_sync_obj), kmp_int32 is_constrained) {
455  return __kmp_execute_tasks_64(this_thr, gtid, this, final_spin, thread_finished
456  USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
457  }
458  void wait(kmp_info_t *this_thr, int final_spin
459  USE_ITT_BUILD_ARG(void * itt_sync_obj)) {
460  __kmp_wait_template(this_thr, this, final_spin
461  USE_ITT_BUILD_ARG(itt_sync_obj));
462  }
463  void release() { __kmp_release_template(this); }
464  flag_type get_ptr_type() { return flag64; }
465 };
466 
467 // Hierarchical 64-bit on-core barrier instantiation
468 class kmp_flag_oncore : public kmp_flag<kmp_uint64> {
469  kmp_uint64 checker;
470  kmp_info_t * waiting_threads[1];
471  kmp_uint32 num_waiting_threads;
472  kmp_uint32 offset;
473  bool flag_switch;
474  enum barrier_type bt;
475  kmp_info_t * this_thr;
476 #if USE_ITT_BUILD
477  void *itt_sync_obj;
478 #endif
479  unsigned char& byteref(volatile kmp_uint64* loc, size_t offset) { return ((unsigned char *)loc)[offset]; }
480 public:
481  kmp_flag_oncore(volatile kmp_uint64 *p)
482  : kmp_flag<kmp_uint64>(p, flag_oncore), num_waiting_threads(0), flag_switch(false) {}
483  kmp_flag_oncore(volatile kmp_uint64 *p, kmp_uint32 idx)
484  : kmp_flag<kmp_uint64>(p, flag_oncore), num_waiting_threads(0), offset(idx), flag_switch(false) {}
485  kmp_flag_oncore(volatile kmp_uint64 *p, kmp_uint64 c, kmp_uint32 idx, enum barrier_type bar_t,
486  kmp_info_t * thr
487 #if USE_ITT_BUILD
488  , void *itt
489 #endif
490  )
491  : kmp_flag<kmp_uint64>(p, flag_oncore), checker(c), num_waiting_threads(0), offset(idx),
492  flag_switch(false), bt(bar_t), this_thr(thr)
493 #if USE_ITT_BUILD
494  , itt_sync_obj(itt)
495 #endif
496  {}
497  kmp_info_t * get_waiter(kmp_uint32 i) {
498  KMP_DEBUG_ASSERT(i<num_waiting_threads);
499  return waiting_threads[i];
500  }
501  kmp_uint32 get_num_waiters() { return num_waiting_threads; }
502  void set_waiter(kmp_info_t *thr) {
503  waiting_threads[0] = thr;
504  num_waiting_threads = 1;
505  }
506  bool done_check_val(kmp_uint64 old_loc) { return byteref(&old_loc,offset) == checker; }
507  bool done_check() { return done_check_val(*get()); }
508  bool notdone_check() {
509  // Calculate flag_switch
510  if (this_thr->th.th_bar[bt].bb.wait_flag == KMP_BARRIER_SWITCH_TO_OWN_FLAG)
511  flag_switch = true;
512  if (byteref(get(),offset) != 1 && !flag_switch)
513  return true;
514  else if (flag_switch) {
515  this_thr->th.th_bar[bt].bb.wait_flag = KMP_BARRIER_SWITCHING;
516  kmp_flag_64 flag(&this_thr->th.th_bar[bt].bb.b_go, (kmp_uint64)KMP_BARRIER_STATE_BUMP);
517  __kmp_wait_64(this_thr, &flag, TRUE
518 #if USE_ITT_BUILD
519  , itt_sync_obj
520 #endif
521  );
522  }
523  return false;
524  }
525  void internal_release() {
526  if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
527  byteref(get(),offset) = 1;
528  }
529  else {
530  kmp_uint64 mask=0;
531  byteref(&mask,offset) = 1;
532  (void) KMP_TEST_THEN_OR64((volatile kmp_int64 *)get(), mask);
533  }
534  }
535  kmp_uint64 set_sleeping() {
536  return KMP_TEST_THEN_OR64((kmp_int64 volatile *)get(), KMP_BARRIER_SLEEP_STATE);
537  }
538  kmp_uint64 unset_sleeping() {
539  return KMP_TEST_THEN_AND64((kmp_int64 volatile *)get(), ~KMP_BARRIER_SLEEP_STATE);
540  }
541  bool is_sleeping_val(kmp_uint64 old_loc) { return old_loc & KMP_BARRIER_SLEEP_STATE; }
542  bool is_sleeping() { return is_sleeping_val(*get()); }
543  bool is_any_sleeping() { return is_sleeping_val(*get()); }
544  void wait(kmp_info_t *this_thr, int final_spin) {
545  __kmp_wait_template<kmp_flag_oncore>(this_thr, this, final_spin
546  USE_ITT_BUILD_ARG(itt_sync_obj));
547  }
548  void release() { __kmp_release_template(this); }
549  void suspend(int th_gtid) { __kmp_suspend_oncore(th_gtid, this); }
550  void resume(int th_gtid) { __kmp_resume_oncore(th_gtid, this); }
551  int execute_tasks(kmp_info_t *this_thr, kmp_int32 gtid, int final_spin, int *thread_finished
552  USE_ITT_BUILD_ARG(void * itt_sync_obj), kmp_int32 is_constrained) {
553  return __kmp_execute_tasks_oncore(this_thr, gtid, this, final_spin, thread_finished
554  USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
555  }
556  kmp_uint8 *get_stolen() { return NULL; }
557  enum barrier_type get_bt() { return bt; }
558  flag_type get_ptr_type() { return flag_oncore; }
559 };
560 
561 
566 #endif // KMP_WAIT_RELEASE_H
volatile P * loc
flag_type get_type()
flag_type
flag_type t