17#include "kmp_wait_release.h"
18#include "kmp_taskdeps.h"
20#include "ompt-specific.h"
32#ifdef KMP_SUPPORT_GRAPH_OUTPUT
33static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
36static void __kmp_init_node(kmp_depnode_t *node) {
37 node->dn.successors = NULL;
40 for (
int i = 0; i < MAX_MTX_DEPS; ++i)
41 node->dn.mtx_locks[i] = NULL;
42 node->dn.mtx_num_locks = 0;
43 __kmp_init_lock(&node->dn.lock);
44 KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1);
45#ifdef KMP_SUPPORT_GRAPH_OUTPUT
46 node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
48#if USE_ITT_BUILD && USE_ITT_NOTIFY
49 __itt_sync_create(node,
"OMP task dep node", NULL, 0);
53static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
54 KMP_ATOMIC_INC(&node->dn.nrefs);
58enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
60size_t sizes[] = {997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029};
61const size_t MAX_GEN = 8;
63static inline size_t __kmp_dephash_hash(kmp_intptr_t addr,
size_t hsize) {
66 return ((addr >> 6) ^ (addr >> 2)) % hsize;
69static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread,
70 kmp_dephash_t *current_dephash) {
73 size_t gen = current_dephash->generation + 1;
75 return current_dephash;
76 size_t new_size = sizes[gen];
78 size_t size_to_allocate =
79 new_size *
sizeof(kmp_dephash_entry_t *) +
sizeof(kmp_dephash_t);
82 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate);
84 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate);
88 h->nelements = current_dephash->nelements;
89 h->buckets = (kmp_dephash_entry **)(h + 1);
92 h->last_all = current_dephash->last_all;
95 for (
size_t i = 0; i < new_size; i++) {
100 for (
size_t i = 0; i < current_dephash->size; i++) {
101 kmp_dephash_entry_t *next, *entry;
102 for (entry = current_dephash->buckets[i]; entry; entry = next) {
103 next = entry->next_in_bucket;
106 size_t new_bucket = __kmp_dephash_hash(entry->addr, h->size);
107 entry->next_in_bucket = h->buckets[new_bucket];
108 if (entry->next_in_bucket) {
111 h->buckets[new_bucket] = entry;
117 __kmp_fast_free(thread, current_dephash);
119 __kmp_thread_free(thread, current_dephash);
125static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
126 kmp_taskdata_t *current_task) {
131 if (current_task->td_flags.tasktype == TASK_IMPLICIT)
132 h_size = KMP_DEPHASH_MASTER_SIZE;
134 h_size = KMP_DEPHASH_OTHER_SIZE;
136 size_t size = h_size *
sizeof(kmp_dephash_entry_t *) +
sizeof(kmp_dephash_t);
139 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
141 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
148 h->buckets = (kmp_dephash_entry **)(h + 1);
151 for (
size_t i = 0; i < h_size; i++)
157static kmp_dephash_entry *__kmp_dephash_find(kmp_info_t *thread,
158 kmp_dephash_t **hash,
160 kmp_dephash_t *h = *hash;
161 if (h->nelements != 0 && h->nconflicts / h->size >= 1) {
162 *hash = __kmp_dephash_extend(thread, h);
165 size_t bucket = __kmp_dephash_hash(addr, h->size);
167 kmp_dephash_entry_t *entry;
168 for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
169 if (entry->addr == addr)
175 entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
176 thread,
sizeof(kmp_dephash_entry_t));
178 entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
179 thread,
sizeof(kmp_dephash_entry_t));
183 entry->last_out = NULL;
185 entry->last_out = __kmp_node_ref(h->last_all);
186 entry->last_set = NULL;
187 entry->prev_set = NULL;
188 entry->last_flag = 0;
189 entry->mtx_lock = NULL;
190 entry->next_in_bucket = h->buckets[bucket];
191 h->buckets[bucket] = entry;
193 if (entry->next_in_bucket)
199static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
200 kmp_depnode_list_t *list,
201 kmp_depnode_t *node) {
202 kmp_depnode_list_t *new_head;
205 new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
206 thread,
sizeof(kmp_depnode_list_t));
208 new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
209 thread,
sizeof(kmp_depnode_list_t));
212 new_head->node = __kmp_node_ref(node);
213 new_head->next = list;
218static inline void __kmp_track_dependence(kmp_int32 gtid, kmp_depnode_t *source,
220 kmp_task_t *sink_task) {
221#ifdef KMP_SUPPORT_GRAPH_OUTPUT
222 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
225 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
227 __kmp_printf(
"%d(%s) -> %d(%s)\n", source->dn.id,
228 task_source->td_ident->psource, sink->dn.id,
229 task_sink->td_ident->psource);
231#if OMPT_SUPPORT && OMPT_OPTIONAL
235 if (ompt_enabled.ompt_callback_task_dependence) {
236 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
237 ompt_data_t *sink_data;
239 sink_data = &(KMP_TASK_TO_TASKDATA(sink_task)->ompt_task_info.task_data);
241 sink_data = &__kmp_threads[gtid]->th.ompt_thread_info.task_data;
243 ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
244 &(task_source->ompt_task_info.task_data), sink_data);
249static inline kmp_int32
250__kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread,
251 kmp_task_t *task, kmp_depnode_t *node,
252 kmp_depnode_list_t *plist) {
255 kmp_int32 npredecessors = 0;
257 for (kmp_depnode_list_t *p = plist; p; p = p->next) {
258 kmp_depnode_t *dep = p->node;
260 KMP_ACQUIRE_DEPNODE(gtid, dep);
262 __kmp_track_dependence(gtid, dep, node, task);
263 dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node);
264 KA_TRACE(40, (
"__kmp_process_deps: T#%d adding dependence from %p to "
266 gtid, KMP_TASK_TO_TASKDATA(dep->dn.task),
267 KMP_TASK_TO_TASKDATA(task)));
270 KMP_RELEASE_DEPNODE(gtid, dep);
273 return npredecessors;
276static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid,
279 kmp_depnode_t *source,
280 kmp_depnode_t *sink) {
283 kmp_int32 npredecessors = 0;
286 KMP_ACQUIRE_DEPNODE(gtid, sink);
288 __kmp_track_dependence(gtid, sink, source, task);
289 sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source);
290 KA_TRACE(40, (
"__kmp_process_deps: T#%d adding dependence from %p to "
292 gtid, KMP_TASK_TO_TASKDATA(sink->dn.task),
293 KMP_TASK_TO_TASKDATA(task)));
296 KMP_RELEASE_DEPNODE(gtid, sink);
298 return npredecessors;
301static inline kmp_int32
302__kmp_process_dep_all(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *h,
303 bool dep_barrier, kmp_task_t *task) {
304 KA_TRACE(30, (
"__kmp_process_dep_all: T#%d processing dep_all, "
305 "dep_barrier = %d\n",
307 kmp_info_t *thread = __kmp_threads[gtid];
308 kmp_int32 npredecessors = 0;
312 __kmp_depnode_link_successor(gtid, thread, task, node, h->last_all);
313 __kmp_node_deref(thread, h->last_all);
315 h->last_all = __kmp_node_ref(node);
324 for (
size_t i = 0; i < h->size; i++) {
325 kmp_dephash_entry_t *info = h->buckets[i];
328 for (; info; info = info->next_in_bucket) {
330 kmp_depnode_t *last_out = info->last_out;
331 kmp_depnode_list_t *last_set = info->last_set;
332 kmp_depnode_list_t *prev_set = info->prev_set;
335 __kmp_depnode_link_successor(gtid, thread, task, node, last_set);
336 __kmp_depnode_list_free(thread, last_set);
337 __kmp_depnode_list_free(thread, prev_set);
338 info->last_set = NULL;
339 info->prev_set = NULL;
343 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
345 __kmp_node_deref(thread, last_out);
347 info->last_out = __kmp_node_ref(node);
349 info->last_out = NULL;
353 KA_TRACE(30, (
"__kmp_process_dep_all: T#%d found %d predecessors\n", gtid,
355 return npredecessors;
358template <
bool filter>
359static inline kmp_int32
360__kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash,
361 bool dep_barrier, kmp_int32 ndeps,
362 kmp_depend_info_t *dep_list, kmp_task_t *task) {
363 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d processing %d dependences : "
364 "dep_barrier = %d\n",
365 filter, gtid, ndeps, dep_barrier));
367 kmp_info_t *thread = __kmp_threads[gtid];
368 kmp_int32 npredecessors = 0;
369 for (kmp_int32 i = 0; i < ndeps; i++) {
370 const kmp_depend_info_t *dep = &dep_list[i];
372 if (filter && dep->base_addr == 0)
375 kmp_dephash_entry_t *info =
376 __kmp_dephash_find(thread, hash, dep->base_addr);
377 kmp_depnode_t *last_out = info->last_out;
378 kmp_depnode_list_t *last_set = info->last_set;
379 kmp_depnode_list_t *prev_set = info->prev_set;
381 if (dep->flags.out) {
384 __kmp_depnode_link_successor(gtid, thread, task, node, last_set);
385 __kmp_depnode_list_free(thread, last_set);
386 __kmp_depnode_list_free(thread, prev_set);
387 info->last_set = NULL;
388 info->prev_set = NULL;
392 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
394 __kmp_node_deref(thread, last_out);
396 info->last_out = __kmp_node_ref(node);
401 info->last_out = NULL;
404 if (info->last_flag == 0 || info->last_flag == dep->flag) {
408 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
411 __kmp_depnode_link_successor(gtid, thread, task, node, prev_set);
414 __kmp_node_deref(thread, last_out);
415 info->last_out = NULL;
416 __kmp_depnode_list_free(thread, prev_set);
417 info->prev_set = NULL;
422 __kmp_depnode_link_successor(gtid, thread, task, node, last_set);
424 __kmp_node_deref(thread, last_out);
425 info->last_out = NULL;
427 __kmp_depnode_list_free(thread, prev_set);
430 info->prev_set = last_set;
432 info->prev_set = NULL;
435 info->last_set = NULL;
440 info->last_flag = dep->flag;
441 info->last_set = __kmp_add_node(thread, info->last_set, node);
444 if (dep->flag == KMP_DEP_MTX) {
445 if (info->mtx_lock == NULL) {
446 info->mtx_lock = (kmp_lock_t *)__kmp_allocate(
sizeof(kmp_lock_t));
447 __kmp_init_lock(info->mtx_lock);
449 KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS);
452 for (m = 0; m < MAX_MTX_DEPS; ++m) {
454 if (node->dn.mtx_locks[m] < info->mtx_lock) {
455 KMP_DEBUG_ASSERT(!node->dn.mtx_locks[node->dn.mtx_num_locks]);
456 for (
int n = node->dn.mtx_num_locks; n > m; --n) {
458 KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL);
459 node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1];
461 node->dn.mtx_locks[m] = info->mtx_lock;
465 KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS);
466 node->dn.mtx_num_locks++;
470 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
471 gtid, npredecessors));
472 return npredecessors;
475#define NO_DEP_BARRIER (false)
476#define DEP_BARRIER (true)
479static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
480 kmp_task_t *task, kmp_dephash_t **hash,
481 bool dep_barrier, kmp_int32 ndeps,
482 kmp_depend_info_t *dep_list,
483 kmp_int32 ndeps_noalias,
484 kmp_depend_info_t *noalias_dep_list) {
485 int i, n_mtxs = 0, dep_all = 0;
487 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
489 KA_TRACE(20, (
"__kmp_check_deps: T#%d checking dependences for task %p : %d "
490 "possibly aliased dependences, %d non-aliased dependences : "
491 "dep_barrier=%d .\n",
492 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
496 for (i = 0; i < ndeps; i++) {
497 if (dep_list[i].base_addr != 0 &&
498 dep_list[i].base_addr != (kmp_intptr_t)KMP_SIZE_T_MAX) {
500 dep_list[i].flag == KMP_DEP_IN || dep_list[i].flag == KMP_DEP_OUT ||
501 dep_list[i].flag == KMP_DEP_INOUT ||
502 dep_list[i].flag == KMP_DEP_MTX || dep_list[i].flag == KMP_DEP_SET);
503 for (
int j = i + 1; j < ndeps; j++) {
504 if (dep_list[i].base_addr == dep_list[j].base_addr) {
505 if (dep_list[i].flag != dep_list[j].flag) {
507 dep_list[i].flag = KMP_DEP_OUT;
509 dep_list[j].base_addr = 0;
512 if (dep_list[i].flag == KMP_DEP_MTX) {
514 if (n_mtxs < MAX_MTX_DEPS && task != NULL) {
517 dep_list[i].flag = KMP_DEP_OUT;
520 }
else if (dep_list[i].flag == KMP_DEP_ALL ||
521 dep_list[i].base_addr == (kmp_intptr_t)KMP_SIZE_T_MAX) {
534 node->dn.npredecessors = -1;
541 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier,
542 ndeps, dep_list, task);
543 npredecessors += __kmp_process_deps<false>(
544 gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
546 npredecessors = __kmp_process_dep_all(gtid, node, *hash, dep_barrier, task);
549 node->dn.task = task;
559 node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
561 KA_TRACE(20, (
"__kmp_check_deps: T#%d found %d predecessors for task %p \n",
562 gtid, npredecessors, taskdata));
566 return npredecessors > 0 ? true :
false;
586 kmp_task_t *new_task, kmp_int32 ndeps,
587 kmp_depend_info_t *dep_list,
588 kmp_int32 ndeps_noalias,
589 kmp_depend_info_t *noalias_dep_list) {
591 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
592 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
593 loc_ref, new_taskdata));
594 __kmp_assert_valid_gtid(gtid);
595 kmp_info_t *thread = __kmp_threads[gtid];
596 kmp_taskdata_t *current_task = thread->th.th_current_task;
599 if (ompt_enabled.enabled) {
600 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
601 current_task->ompt_task_info.frame.enter_frame.ptr =
602 OMPT_GET_FRAME_ADDRESS(0);
603 if (ompt_enabled.ompt_callback_task_create) {
604 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
605 &(current_task->ompt_task_info.task_data),
606 &(current_task->ompt_task_info.frame),
607 &(new_taskdata->ompt_task_info.task_data),
608 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
609 OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
612 new_taskdata->ompt_task_info.frame.enter_frame.ptr =
613 OMPT_GET_FRAME_ADDRESS(0);
618 if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) {
621 int ompt_ndeps = ndeps + ndeps_noalias;
622 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
623 thread, (ndeps + ndeps_noalias) *
sizeof(ompt_dependence_t));
625 KMP_ASSERT(ompt_deps != NULL);
627 for (i = 0; i < ndeps; i++) {
628 ompt_deps[i].variable.ptr = (
void *)dep_list[i].base_addr;
629 if (dep_list[i].flags.in && dep_list[i].flags.out)
630 ompt_deps[i].dependence_type = ompt_dependence_type_inout;
631 else if (dep_list[i].flags.out)
632 ompt_deps[i].dependence_type = ompt_dependence_type_out;
633 else if (dep_list[i].flags.in)
634 ompt_deps[i].dependence_type = ompt_dependence_type_in;
635 else if (dep_list[i].flags.mtx)
636 ompt_deps[i].dependence_type = ompt_dependence_type_mutexinoutset;
637 else if (dep_list[i].flags.set)
638 ompt_deps[i].dependence_type = ompt_dependence_type_inoutset;
640 for (i = 0; i < ndeps_noalias; i++) {
641 ompt_deps[ndeps + i].variable.ptr = (
void *)noalias_dep_list[i].base_addr;
642 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
643 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
644 else if (noalias_dep_list[i].flags.out)
645 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
646 else if (noalias_dep_list[i].flags.in)
647 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
648 else if (noalias_dep_list[i].flags.mtx)
649 ompt_deps[ndeps + i].dependence_type =
650 ompt_dependence_type_mutexinoutset;
651 else if (noalias_dep_list[i].flags.set)
652 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset;
654 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
655 &(new_taskdata->ompt_task_info.task_data), ompt_deps, ompt_ndeps);
658 KMP_OMPT_DEPS_FREE(thread, ompt_deps);
663 bool serial = current_task->td_flags.team_serial ||
664 current_task->td_flags.tasking_ser ||
665 current_task->td_flags.final;
666 kmp_task_team_t *task_team = thread->th.th_task_team;
668 !(task_team && (task_team->tt.tt_found_proxy_tasks ||
669 task_team->tt.tt_hidden_helper_task_encountered));
671 if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
673 if (current_task->td_dephash == NULL)
674 current_task->td_dephash = __kmp_dephash_create(thread, current_task);
677 kmp_depnode_t *node =
678 (kmp_depnode_t *)__kmp_fast_allocate(thread,
sizeof(kmp_depnode_t));
680 kmp_depnode_t *node =
681 (kmp_depnode_t *)__kmp_thread_malloc(thread,
sizeof(kmp_depnode_t));
684 __kmp_init_node(node);
685 new_taskdata->td_depnode = node;
687 if (__kmp_check_deps(gtid, node, new_task, ¤t_task->td_dephash,
688 NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
690 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
692 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
693 gtid, loc_ref, new_taskdata));
695 if (ompt_enabled.enabled) {
696 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
699 return TASK_CURRENT_NOT_QUEUED;
702 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d ignored dependences "
703 "for task (serialized) loc=%p task=%p\n",
704 gtid, loc_ref, new_taskdata));
707 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
709 "loc=%p task=%p, transferring to __kmp_omp_task\n",
710 gtid, loc_ref, new_taskdata));
712 kmp_int32 ret = __kmp_omp_task(gtid, new_task,
true);
714 if (ompt_enabled.enabled) {
715 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
722void __ompt_taskwait_dep_finish(kmp_taskdata_t *current_task,
723 ompt_data_t *taskwait_task_data) {
724 if (ompt_enabled.ompt_callback_task_schedule) {
725 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
726 taskwait_task_data, ompt_taskwait_complete, NULL);
728 current_task->ompt_task_info.frame.enter_frame.ptr = NULL;
729 *taskwait_task_data = ompt_data_none;
745 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
746 kmp_depend_info_t *noalias_dep_list) {
747 __kmpc_omp_taskwait_deps_51(loc_ref, gtid, ndeps, dep_list, ndeps_noalias,
748 noalias_dep_list,
false);
756void __kmpc_omp_taskwait_deps_51(
ident_t *loc_ref, kmp_int32 gtid,
757 kmp_int32 ndeps, kmp_depend_info_t *dep_list,
758 kmp_int32 ndeps_noalias,
759 kmp_depend_info_t *noalias_dep_list,
760 kmp_int32 has_no_wait) {
761 KA_TRACE(10, (
"__kmpc_omp_taskwait_deps(enter): T#%d loc=%p nowait#%d\n",
762 gtid, loc_ref, has_no_wait));
763 if (ndeps == 0 && ndeps_noalias == 0) {
764 KA_TRACE(10, (
"__kmpc_omp_taskwait_deps(exit): T#%d has no dependences to "
765 "wait upon : loc=%p\n",
769 __kmp_assert_valid_gtid(gtid);
770 kmp_info_t *thread = __kmp_threads[gtid];
771 kmp_taskdata_t *current_task = thread->th.th_current_task;
779 ompt_data_t *taskwait_task_data = &thread->th.ompt_thread_info.task_data;
780 KMP_ASSERT(taskwait_task_data->ptr == NULL);
781 if (ompt_enabled.enabled) {
782 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
783 current_task->ompt_task_info.frame.enter_frame.ptr =
784 OMPT_GET_FRAME_ADDRESS(0);
785 if (ompt_enabled.ompt_callback_task_create) {
786 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
787 &(current_task->ompt_task_info.task_data),
788 &(current_task->ompt_task_info.frame), taskwait_task_data,
789 ompt_task_taskwait | ompt_task_undeferred | ompt_task_mergeable, 1,
790 OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid));
796 if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) {
799 int ompt_ndeps = ndeps + ndeps_noalias;
800 ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
801 thread, (ndeps + ndeps_noalias) *
sizeof(ompt_dependence_t));
803 KMP_ASSERT(ompt_deps != NULL);
805 for (i = 0; i < ndeps; i++) {
806 ompt_deps[i].variable.ptr = (
void *)dep_list[i].base_addr;
807 if (dep_list[i].flags.in && dep_list[i].flags.out)
808 ompt_deps[i].dependence_type = ompt_dependence_type_inout;
809 else if (dep_list[i].flags.out)
810 ompt_deps[i].dependence_type = ompt_dependence_type_out;
811 else if (dep_list[i].flags.in)
812 ompt_deps[i].dependence_type = ompt_dependence_type_in;
813 else if (dep_list[i].flags.mtx)
814 ompt_deps[ndeps + i].dependence_type =
815 ompt_dependence_type_mutexinoutset;
816 else if (dep_list[i].flags.set)
817 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset;
819 for (i = 0; i < ndeps_noalias; i++) {
820 ompt_deps[ndeps + i].variable.ptr = (
void *)noalias_dep_list[i].base_addr;
821 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
822 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout;
823 else if (noalias_dep_list[i].flags.out)
824 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out;
825 else if (noalias_dep_list[i].flags.in)
826 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in;
827 else if (noalias_dep_list[i].flags.mtx)
828 ompt_deps[ndeps + i].dependence_type =
829 ompt_dependence_type_mutexinoutset;
830 else if (noalias_dep_list[i].flags.set)
831 ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset;
833 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
834 taskwait_task_data, ompt_deps, ompt_ndeps);
837 KMP_OMPT_DEPS_FREE(thread, ompt_deps);
846 bool ignore = current_task->td_flags.team_serial ||
847 current_task->td_flags.tasking_ser ||
848 current_task->td_flags.final;
850 ignore && thread->th.th_task_team != NULL &&
851 thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE &&
852 thread->th.th_task_team->tt.tt_hidden_helper_task_encountered == FALSE;
853 ignore = ignore || current_task->td_dephash == NULL;
856 KA_TRACE(10, (
"__kmpc_omp_taskwait_deps(exit): T#%d has no blocking "
857 "dependences : loc=%p\n",
860 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
865 kmp_depnode_t node = {0};
866 __kmp_init_node(&node);
868 if (!__kmp_check_deps(gtid, &node, NULL, ¤t_task->td_dephash,
869 DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
871 KA_TRACE(10, (
"__kmpc_omp_taskwait_deps(exit): T#%d has no blocking "
872 "dependences : loc=%p\n",
875 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
880 int thread_finished = FALSE;
881 kmp_flag_32<false, false> flag(
882 (std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
883 while (node.dn.npredecessors > 0) {
884 flag.execute_tasks(thread, gtid, FALSE,
885 &thread_finished USE_ITT_BUILD_ARG(NULL),
886 __kmp_task_stealing_constraint);
890 __ompt_taskwait_dep_finish(current_task, taskwait_task_data);
892 KA_TRACE(10, (
"__kmpc_omp_taskwait_deps(exit): T#%d finished waiting : loc=%p\
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)