LLVM OpenMP* Runtime Library
kmp_taskdeps.cpp
1 /*
2  * kmp_taskdeps.cpp
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 //#define KMP_SUPPORT_GRAPH_OUTPUT 1
15 
16 #include "kmp.h"
17 #include "kmp_io.h"
18 #include "kmp_wait_release.h"
19 #include "kmp_taskdeps.h"
20 #if OMPT_SUPPORT
21 #include "ompt-specific.h"
22 #endif
23 
24 #if OMP_40_ENABLED
25 
26 // TODO: Improve memory allocation? keep a list of pre-allocated structures?
27 // allocate in blocks? re-use list finished list entries?
28 // TODO: don't use atomic ref counters for stack-allocated nodes.
29 // TODO: find an alternate to atomic refs for heap-allocated nodes?
30 // TODO: Finish graph output support
31 // TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other
32 // runtime locks
33 // TODO: Any ITT support needed?
34 
35 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
36 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
37 #endif
38 
39 static void __kmp_init_node(kmp_depnode_t *node) {
40  node->dn.successors = NULL;
41  node->dn.task = NULL; // will point to the rigth task
42  // once dependences have been processed
43  for (int i = 0; i < MAX_MTX_DEPS; ++i)
44  node->dn.mtx_locks[i] = NULL;
45  node->dn.mtx_num_locks = 0;
46  __kmp_init_lock(&node->dn.lock);
47  KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1); // init creates the first reference
48 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
49  node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
50 #endif
51 }
52 
53 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
54  KMP_ATOMIC_INC(&node->dn.nrefs);
55  return node;
56 }
57 
58 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
59 
60 static inline kmp_int32 __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) {
61  // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) %
62  // m_num_sets );
63  return ((addr >> 6) ^ (addr >> 2)) % hsize;
64 }
65 
66 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
67  kmp_taskdata_t *current_task) {
68  kmp_dephash_t *h;
69 
70  size_t h_size;
71 
72  if (current_task->td_flags.tasktype == TASK_IMPLICIT)
73  h_size = KMP_DEPHASH_MASTER_SIZE;
74  else
75  h_size = KMP_DEPHASH_OTHER_SIZE;
76 
77  kmp_int32 size =
78  h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
79 
80 #if USE_FAST_MEMORY
81  h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
82 #else
83  h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
84 #endif
85  h->size = h_size;
86 
87 #ifdef KMP_DEBUG
88  h->nelements = 0;
89  h->nconflicts = 0;
90 #endif
91  h->buckets = (kmp_dephash_entry **)(h + 1);
92 
93  for (size_t i = 0; i < h_size; i++)
94  h->buckets[i] = 0;
95 
96  return h;
97 }
98 
99 #define ENTRY_LAST_INS 0
100 #define ENTRY_LAST_MTXS 1
101 
102 static kmp_dephash_entry *
103 __kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t *h, kmp_intptr_t addr) {
104  kmp_int32 bucket = __kmp_dephash_hash(addr, h->size);
105 
106  kmp_dephash_entry_t *entry;
107  for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
108  if (entry->addr == addr)
109  break;
110 
111  if (entry == NULL) {
112 // create entry. This is only done by one thread so no locking required
113 #if USE_FAST_MEMORY
114  entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
115  thread, sizeof(kmp_dephash_entry_t));
116 #else
117  entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
118  thread, sizeof(kmp_dephash_entry_t));
119 #endif
120  entry->addr = addr;
121  entry->last_out = NULL;
122  entry->last_ins = NULL;
123  entry->last_mtxs = NULL;
124  entry->last_flag = ENTRY_LAST_INS;
125  entry->mtx_lock = NULL;
126  entry->next_in_bucket = h->buckets[bucket];
127  h->buckets[bucket] = entry;
128 #ifdef KMP_DEBUG
129  h->nelements++;
130  if (entry->next_in_bucket)
131  h->nconflicts++;
132 #endif
133  }
134  return entry;
135 }
136 
137 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
138  kmp_depnode_list_t *list,
139  kmp_depnode_t *node) {
140  kmp_depnode_list_t *new_head;
141 
142 #if USE_FAST_MEMORY
143  new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
144  thread, sizeof(kmp_depnode_list_t));
145 #else
146  new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
147  thread, sizeof(kmp_depnode_list_t));
148 #endif
149 
150  new_head->node = __kmp_node_ref(node);
151  new_head->next = list;
152 
153  return new_head;
154 }
155 
156 static inline void __kmp_track_dependence(kmp_depnode_t *source,
157  kmp_depnode_t *sink,
158  kmp_task_t *sink_task) {
159 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
160  kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
161  // do not use sink->dn.task as that is only filled after the dependencies
162  // are already processed!
163  kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
164 
165  __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id,
166  task_source->td_ident->psource, sink->dn.id,
167  task_sink->td_ident->psource);
168 #endif
169 #if OMPT_SUPPORT && OMPT_OPTIONAL
170  /* OMPT tracks dependences between task (a=source, b=sink) in which
171  task a blocks the execution of b through the ompt_new_dependence_callback
172  */
173  if (ompt_enabled.ompt_callback_task_dependence) {
174  kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
175  kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
176 
177  ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
178  &(task_source->ompt_task_info.task_data),
179  &(task_sink->ompt_task_info.task_data));
180  }
181 #endif /* OMPT_SUPPORT && OMPT_OPTIONAL */
182 }
183 
184 static inline kmp_int32
185 __kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread,
186  kmp_task_t *task, kmp_depnode_t *node,
187  kmp_depnode_list_t *plist) {
188  if (!plist)
189  return 0;
190  kmp_int32 npredecessors = 0;
191  // link node as successor of list elements
192  for (kmp_depnode_list_t *p = plist; p; p = p->next) {
193  kmp_depnode_t *dep = p->node;
194  if (dep->dn.task) {
195  KMP_ACQUIRE_DEPNODE(gtid, dep);
196  if (dep->dn.task) {
197  __kmp_track_dependence(dep, node, task);
198  dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node);
199  KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
200  "%p\n",
201  gtid, KMP_TASK_TO_TASKDATA(dep->dn.task),
202  KMP_TASK_TO_TASKDATA(task)));
203  npredecessors++;
204  }
205  KMP_RELEASE_DEPNODE(gtid, dep);
206  }
207  }
208  return npredecessors;
209 }
210 
211 static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid,
212  kmp_info_t *thread,
213  kmp_task_t *task,
214  kmp_depnode_t *source,
215  kmp_depnode_t *sink) {
216  if (!sink)
217  return 0;
218  kmp_int32 npredecessors = 0;
219  if (sink->dn.task) {
220  // synchronously add source to sink' list of successors
221  KMP_ACQUIRE_DEPNODE(gtid, sink);
222  if (sink->dn.task) {
223  __kmp_track_dependence(sink, source, task);
224  sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source);
225  KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to "
226  "%p\n",
227  gtid, KMP_TASK_TO_TASKDATA(sink->dn.task),
228  KMP_TASK_TO_TASKDATA(task)));
229  npredecessors++;
230  }
231  KMP_RELEASE_DEPNODE(gtid, sink);
232  }
233  return npredecessors;
234 }
235 
236 template <bool filter>
237 static inline kmp_int32
238 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
239  bool dep_barrier, kmp_int32 ndeps,
240  kmp_depend_info_t *dep_list, kmp_task_t *task) {
241  KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependencies : "
242  "dep_barrier = %d\n",
243  filter, gtid, ndeps, dep_barrier));
244 
245  kmp_info_t *thread = __kmp_threads[gtid];
246  kmp_int32 npredecessors = 0;
247  for (kmp_int32 i = 0; i < ndeps; i++) {
248  const kmp_depend_info_t *dep = &dep_list[i];
249 
250  if (filter && dep->base_addr == 0)
251  continue; // skip filtered entries
252 
253  kmp_dephash_entry_t *info =
254  __kmp_dephash_find(thread, hash, dep->base_addr);
255  kmp_depnode_t *last_out = info->last_out;
256  kmp_depnode_list_t *last_ins = info->last_ins;
257  kmp_depnode_list_t *last_mtxs = info->last_mtxs;
258 
259  if (dep->flags.out) { // out --> clean lists of ins and mtxs if any
260  if (last_ins || last_mtxs) {
261  if (info->last_flag == ENTRY_LAST_INS) { // INS were last
262  npredecessors +=
263  __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
264  } else { // MTXS were last
265  npredecessors +=
266  __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
267  }
268  __kmp_depnode_list_free(thread, last_ins);
269  __kmp_depnode_list_free(thread, last_mtxs);
270  info->last_ins = NULL;
271  info->last_mtxs = NULL;
272  } else {
273  npredecessors +=
274  __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
275  }
276  __kmp_node_deref(thread, last_out);
277  if (dep_barrier) {
278  // if this is a sync point in the serial sequence, then the previous
279  // outputs are guaranteed to be completed after the execution of this
280  // task so the previous output nodes can be cleared.
281  info->last_out = NULL;
282  } else {
283  info->last_out = __kmp_node_ref(node);
284  }
285  } else if (dep->flags.in) {
286  // in --> link node to either last_out or last_mtxs, clean earlier deps
287  if (last_mtxs) {
288  npredecessors +=
289  __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
290  __kmp_node_deref(thread, last_out);
291  info->last_out = NULL;
292  if (info->last_flag == ENTRY_LAST_MTXS && last_ins) { // MTXS were last
293  // clean old INS before creating new list
294  __kmp_depnode_list_free(thread, last_ins);
295  info->last_ins = NULL;
296  }
297  } else {
298  // link node as successor of the last_out if any
299  npredecessors +=
300  __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
301  }
302  info->last_flag = ENTRY_LAST_INS;
303  info->last_ins = __kmp_add_node(thread, info->last_ins, node);
304  } else {
305  KMP_DEBUG_ASSERT(dep->flags.mtx == 1);
306  // mtx --> link node to either last_out or last_ins, clean earlier deps
307  if (last_ins) {
308  npredecessors +=
309  __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
310  __kmp_node_deref(thread, last_out);
311  info->last_out = NULL;
312  if (info->last_flag == ENTRY_LAST_INS && last_mtxs) { // INS were last
313  // clean old MTXS before creating new list
314  __kmp_depnode_list_free(thread, last_mtxs);
315  info->last_mtxs = NULL;
316  }
317  } else {
318  // link node as successor of the last_out if any
319  npredecessors +=
320  __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
321  }
322  info->last_flag = ENTRY_LAST_MTXS;
323  info->last_mtxs = __kmp_add_node(thread, info->last_mtxs, node);
324  if (info->mtx_lock == NULL) {
325  info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t));
326  __kmp_init_lock(info->mtx_lock);
327  }
328  KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS);
329  kmp_int32 m;
330  // Save lock in node's array
331  for (m = 0; m < MAX_MTX_DEPS; ++m) {
332  // sort pointers in decreasing order to avoid potential livelock
333  if (node->dn.mtx_locks[m] < info->mtx_lock) {
334  KMP_DEBUG_ASSERT(node->dn.mtx_locks[node->dn.mtx_num_locks] == NULL);
335  for (int n = node->dn.mtx_num_locks; n > m; --n) {
336  // shift right all lesser non-NULL pointers
337  KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL);
338  node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1];
339  }
340  node->dn.mtx_locks[m] = info->mtx_lock;
341  break;
342  }
343  }
344  KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS); // must break from loop
345  node->dn.mtx_num_locks++;
346  }
347  }
348  KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
349  gtid, npredecessors));
350  return npredecessors;
351 }
352 
353 #define NO_DEP_BARRIER (false)
354 #define DEP_BARRIER (true)
355 
356 // returns true if the task has any outstanding dependence
357 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
358  kmp_task_t *task, kmp_dephash_t *hash,
359  bool dep_barrier, kmp_int32 ndeps,
360  kmp_depend_info_t *dep_list,
361  kmp_int32 ndeps_noalias,
362  kmp_depend_info_t *noalias_dep_list) {
363  int i, n_mtxs = 0;
364 #if KMP_DEBUG
365  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
366 #endif
367  KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependencies for task %p : %d "
368  "possibly aliased dependencies, %d non-aliased depedencies : "
369  "dep_barrier=%d .\n",
370  gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
371 
372  // Filter deps in dep_list
373  // TODO: Different algorithm for large dep_list ( > 10 ? )
374  for (i = 0; i < ndeps; i++) {
375  if (dep_list[i].base_addr != 0) {
376  for (int j = i + 1; j < ndeps; j++) {
377  if (dep_list[i].base_addr == dep_list[j].base_addr) {
378  dep_list[i].flags.in |= dep_list[j].flags.in;
379  dep_list[i].flags.out |=
380  (dep_list[j].flags.out ||
381  (dep_list[i].flags.in && dep_list[j].flags.mtx) ||
382  (dep_list[i].flags.mtx && dep_list[j].flags.in));
383  dep_list[i].flags.mtx =
384  dep_list[i].flags.mtx | dep_list[j].flags.mtx &&
385  !dep_list[i].flags.out;
386  dep_list[j].base_addr = 0; // Mark j element as void
387  }
388  }
389  if (dep_list[i].flags.mtx) {
390  // limit number of mtx deps to MAX_MTX_DEPS per node
391  if (n_mtxs < MAX_MTX_DEPS && task != NULL) {
392  ++n_mtxs;
393  } else {
394  dep_list[i].flags.in = 1; // downgrade mutexinoutset to inout
395  dep_list[i].flags.out = 1;
396  dep_list[i].flags.mtx = 0;
397  }
398  }
399  }
400  }
401 
402  // doesn't need to be atomic as no other thread is going to be accessing this
403  // node just yet.
404  // npredecessors is set -1 to ensure that none of the releasing tasks queues
405  // this task before we have finished processing all the dependencies
406  node->dn.npredecessors = -1;
407 
408  // used to pack all npredecessors additions into a single atomic operation at
409  // the end
410  int npredecessors;
411 
412  npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps,
413  dep_list, task);
414  npredecessors += __kmp_process_deps<false>(
415  gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
416 
417  node->dn.task = task;
418  KMP_MB();
419 
420  // Account for our initial fake value
421  npredecessors++;
422 
423  // Update predecessors and obtain current value to check if there are still
424  // any outstandig dependences (some tasks may have finished while we processed
425  // the dependences)
426  npredecessors =
427  node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
428 
429  KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n",
430  gtid, npredecessors, taskdata));
431 
432  // beyond this point the task could be queued (and executed) by a releasing
433  // task...
434  return npredecessors > 0 ? true : false;
435 }
436 
453 kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid,
454  kmp_task_t *new_task, kmp_int32 ndeps,
455  kmp_depend_info_t *dep_list,
456  kmp_int32 ndeps_noalias,
457  kmp_depend_info_t *noalias_dep_list) {
458 
459  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
460  KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
461  loc_ref, new_taskdata));
462 
463  kmp_info_t *thread = __kmp_threads[gtid];
464  kmp_taskdata_t *current_task = thread->th.th_current_task;
465 
466 #if OMPT_SUPPORT
467  if (ompt_enabled.enabled) {
468  OMPT_STORE_RETURN_ADDRESS(gtid);
469  if (!current_task->ompt_task_info.frame.enter_frame.ptr)
470  current_task->ompt_task_info.frame.enter_frame.ptr =
471  OMPT_GET_FRAME_ADDRESS(0);
472  if (ompt_enabled.ompt_callback_task_create) {
473  ompt_data_t task_data = ompt_data_none;
474  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
475  current_task ? &(current_task->ompt_task_info.task_data) : &task_data,
476  current_task ? &(current_task->ompt_task_info.frame) : NULL,
477  &(new_taskdata->ompt_task_info.task_data),
478  ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
479  OMPT_LOAD_RETURN_ADDRESS(gtid));
480  }
481 
482  new_taskdata->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
483  }
484 
485 #if OMPT_OPTIONAL
486  /* OMPT grab all dependences if requested by the tool */
487  if (ndeps + ndeps_noalias > 0 &&
488  ompt_enabled.ompt_callback_dependences) {
489  kmp_int32 i;
490 
491  new_taskdata->ompt_task_info.ndeps = ndeps + ndeps_noalias;
492  new_taskdata->ompt_task_info.deps =
493  (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
494  thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t));
495 
496  KMP_ASSERT(new_taskdata->ompt_task_info.deps != NULL);
497 
498  for (i = 0; i < ndeps; i++) {
499  new_taskdata->ompt_task_info.deps[i].variable.ptr =
500  (void *)dep_list[i].base_addr;
501  if (dep_list[i].flags.in && dep_list[i].flags.out)
502  new_taskdata->ompt_task_info.deps[i].dependence_type =
503  ompt_dependence_type_inout;
504  else if (dep_list[i].flags.out)
505  new_taskdata->ompt_task_info.deps[i].dependence_type =
506  ompt_dependence_type_out;
507  else if (dep_list[i].flags.in)
508  new_taskdata->ompt_task_info.deps[i].dependence_type =
509  ompt_dependence_type_in;
510  }
511  for (i = 0; i < ndeps_noalias; i++) {
512  new_taskdata->ompt_task_info.deps[ndeps + i].variable.ptr =
513  (void *)noalias_dep_list[i].base_addr;
514  if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
515  new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
516  ompt_dependence_type_inout;
517  else if (noalias_dep_list[i].flags.out)
518  new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
519  ompt_dependence_type_out;
520  else if (noalias_dep_list[i].flags.in)
521  new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
522  ompt_dependence_type_in;
523  }
524  ompt_callbacks.ompt_callback(ompt_callback_dependences)(
525  &(new_taskdata->ompt_task_info.task_data),
526  new_taskdata->ompt_task_info.deps, new_taskdata->ompt_task_info.ndeps);
527  /* We can now free the allocated memory for the dependencies */
528  /* For OMPD we might want to delay the free until task_end */
529  KMP_OMPT_DEPS_FREE(thread, new_taskdata->ompt_task_info.deps);
530  new_taskdata->ompt_task_info.deps = NULL;
531  new_taskdata->ompt_task_info.ndeps = 0;
532  }
533 #endif /* OMPT_OPTIONAL */
534 #endif /* OMPT_SUPPORT */
535 
536  bool serial = current_task->td_flags.team_serial ||
537  current_task->td_flags.tasking_ser ||
538  current_task->td_flags.final;
539 #if OMP_45_ENABLED
540  kmp_task_team_t *task_team = thread->th.th_task_team;
541  serial = serial && !(task_team && task_team->tt.tt_found_proxy_tasks);
542 #endif
543 
544  if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
545  /* if no dependencies have been tracked yet, create the dependence hash */
546  if (current_task->td_dephash == NULL)
547  current_task->td_dephash = __kmp_dephash_create(thread, current_task);
548 
549 #if USE_FAST_MEMORY
550  kmp_depnode_t *node =
551  (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t));
552 #else
553  kmp_depnode_t *node =
554  (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t));
555 #endif
556 
557  __kmp_init_node(node);
558  new_taskdata->td_depnode = node;
559 
560  if (__kmp_check_deps(gtid, node, new_task, current_task->td_dephash,
561  NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
562  noalias_dep_list)) {
563  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
564  "dependencies: "
565  "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
566  gtid, loc_ref, new_taskdata));
567 #if OMPT_SUPPORT
568  if (ompt_enabled.enabled) {
569  current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
570  }
571 #endif
572  return TASK_CURRENT_NOT_QUEUED;
573  }
574  } else {
575  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies "
576  "for task (serialized)"
577  "loc=%p task=%p\n",
578  gtid, loc_ref, new_taskdata));
579  }
580 
581  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
582  "dependencies : "
583  "loc=%p task=%p, transferring to __kmp_omp_task\n",
584  gtid, loc_ref, new_taskdata));
585 
586  kmp_int32 ret = __kmp_omp_task(gtid, new_task, true);
587 #if OMPT_SUPPORT
588  if (ompt_enabled.enabled) {
589  current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
590  }
591 #endif
592  return ret;
593 }
594 
606 void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps,
607  kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
608  kmp_depend_info_t *noalias_dep_list) {
609  KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
610 
611  if (ndeps == 0 && ndeps_noalias == 0) {
612  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependencies to "
613  "wait upon : loc=%p\n",
614  gtid, loc_ref));
615  return;
616  }
617 
618  kmp_info_t *thread = __kmp_threads[gtid];
619  kmp_taskdata_t *current_task = thread->th.th_current_task;
620 
621  // We can return immediately as:
622  // - dependences are not computed in serial teams (except with proxy tasks)
623  // - if the dephash is not yet created it means we have nothing to wait for
624  bool ignore = current_task->td_flags.team_serial ||
625  current_task->td_flags.tasking_ser ||
626  current_task->td_flags.final;
627 #if OMP_45_ENABLED
628  ignore = ignore && thread->th.th_task_team != NULL &&
629  thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
630 #endif
631  ignore = ignore || current_task->td_dephash == NULL;
632 
633  if (ignore) {
634  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
635  "dependencies : loc=%p\n",
636  gtid, loc_ref));
637  return;
638  }
639 
640  kmp_depnode_t node = {0};
641  __kmp_init_node(&node);
642 
643  if (!__kmp_check_deps(gtid, &node, NULL, current_task->td_dephash,
644  DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
645  noalias_dep_list)) {
646  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
647  "dependencies : loc=%p\n",
648  gtid, loc_ref));
649  return;
650  }
651 
652  int thread_finished = FALSE;
653  kmp_flag_32 flag((std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
654  while (node.dn.npredecessors > 0) {
655  flag.execute_tasks(thread, gtid, FALSE,
656  &thread_finished USE_ITT_BUILD_ARG(NULL),
657  __kmp_task_stealing_constraint);
658  }
659 
660  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",
661  gtid, loc_ref));
662 }
663 
664 #endif /* OMP_40_ENABLED */
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
Definition: kmp.h:224