18 #include "kmp_wait_release.h" 20 #include "ompt-specific.h" 34 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 35 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
38 static void __kmp_init_node(kmp_depnode_t *node) {
41 node->dn.successors = NULL;
42 __kmp_init_lock(&node->dn.lock);
43 KMP_ATOMIC_ST_RLX(&node->dn.nrefs,
45 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 46 node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
50 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
51 KMP_ATOMIC_INC(&node->dn.nrefs);
55 static inline void __kmp_node_deref(kmp_info_t *thread, kmp_depnode_t *node) {
59 kmp_int32 n = KMP_ATOMIC_DEC(&node->dn.nrefs) - 1;
61 KMP_ASSERT(node->dn.nrefs == 0);
63 __kmp_fast_free(thread, node);
65 __kmp_thread_free(thread, node);
70 #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid)) 71 #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid)) 73 static void __kmp_depnode_list_free(kmp_info_t *thread, kmp_depnode_list *list);
75 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
77 static inline kmp_int32 __kmp_dephash_hash(kmp_intptr_t addr,
size_t hsize) {
80 return ((addr >> 6) ^ (addr >> 2)) % hsize;
83 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
84 kmp_taskdata_t *current_task) {
89 if (current_task->td_flags.tasktype == TASK_IMPLICIT)
90 h_size = KMP_DEPHASH_MASTER_SIZE;
92 h_size = KMP_DEPHASH_OTHER_SIZE;
95 h_size *
sizeof(kmp_dephash_entry_t *) +
sizeof(kmp_dephash_t);
98 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
100 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
108 h->buckets = (kmp_dephash_entry **)(h + 1);
110 for (
size_t i = 0; i < h_size; i++)
116 void __kmp_dephash_free_entries(kmp_info_t *thread, kmp_dephash_t *h) {
117 for (
size_t i = 0; i < h->size; i++) {
119 kmp_dephash_entry_t *next;
120 for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) {
121 next = entry->next_in_bucket;
122 __kmp_depnode_list_free(thread, entry->last_ins);
123 __kmp_node_deref(thread, entry->last_out);
125 __kmp_fast_free(thread, entry);
127 __kmp_thread_free(thread, entry);
135 void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h) {
136 __kmp_dephash_free_entries(thread, h);
138 __kmp_fast_free(thread, h);
140 __kmp_thread_free(thread, h);
144 static kmp_dephash_entry *
145 __kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t *h, kmp_intptr_t addr) {
146 kmp_int32 bucket = __kmp_dephash_hash(addr, h->size);
148 kmp_dephash_entry_t *entry;
149 for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
150 if (entry->addr == addr)
156 entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
157 thread,
sizeof(kmp_dephash_entry_t));
159 entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
160 thread,
sizeof(kmp_dephash_entry_t));
163 entry->last_out = NULL;
164 entry->last_ins = NULL;
165 entry->next_in_bucket = h->buckets[bucket];
166 h->buckets[bucket] = entry;
169 if (entry->next_in_bucket)
176 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
177 kmp_depnode_list_t *list,
178 kmp_depnode_t *node) {
179 kmp_depnode_list_t *new_head;
182 new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
183 thread,
sizeof(kmp_depnode_list_t));
185 new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
186 thread,
sizeof(kmp_depnode_list_t));
189 new_head->node = __kmp_node_ref(node);
190 new_head->next = list;
195 static void __kmp_depnode_list_free(kmp_info_t *thread,
196 kmp_depnode_list *list) {
197 kmp_depnode_list *next;
199 for (; list; list = next) {
202 __kmp_node_deref(thread, list->node);
204 __kmp_fast_free(thread, list);
206 __kmp_thread_free(thread, list);
211 static inline void __kmp_track_dependence(kmp_depnode_t *source,
213 kmp_task_t *sink_task) {
214 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 215 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
218 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
220 __kmp_printf(
"%d(%s) -> %d(%s)\n", source->dn.id,
221 task_source->td_ident->psource, sink->dn.id,
222 task_sink->td_ident->psource);
224 #if OMPT_SUPPORT && OMPT_OPTIONAL 228 if (ompt_enabled.ompt_callback_task_dependence) {
229 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
230 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
232 ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
233 &(task_source->ompt_task_info.task_data),
234 &(task_sink->ompt_task_info.task_data));
239 template <
bool filter>
240 static inline kmp_int32
241 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
242 bool dep_barrier, kmp_int32 ndeps,
243 kmp_depend_info_t *dep_list, kmp_task_t *task) {
244 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d processing %d dependencies : " 245 "dep_barrier = %d\n",
246 filter, gtid, ndeps, dep_barrier));
248 kmp_info_t *thread = __kmp_threads[gtid];
249 kmp_int32 npredecessors = 0;
250 for (kmp_int32 i = 0; i < ndeps; i++) {
251 const kmp_depend_info_t *dep = &dep_list[i];
253 KMP_DEBUG_ASSERT(dep->flags.in);
255 if (filter && dep->base_addr == 0)
258 kmp_dephash_entry_t *info =
259 __kmp_dephash_find(thread, hash, dep->base_addr);
260 kmp_depnode_t *last_out = info->last_out;
262 if (dep->flags.out && info->last_ins) {
263 for (kmp_depnode_list_t *p = info->last_ins; p; p = p->next) {
264 kmp_depnode_t *indep = p->node;
265 if (indep->dn.task) {
266 KMP_ACQUIRE_DEPNODE(gtid, indep);
267 if (indep->dn.task) {
268 __kmp_track_dependence(indep, node, task);
269 indep->dn.successors =
270 __kmp_add_node(thread, indep->dn.successors, node);
271 KA_TRACE(40, (
"__kmp_process_deps<%d>: T#%d adding dependence from " 273 filter, gtid, KMP_TASK_TO_TASKDATA(indep->dn.task),
274 KMP_TASK_TO_TASKDATA(task)));
277 KMP_RELEASE_DEPNODE(gtid, indep);
281 __kmp_depnode_list_free(thread, info->last_ins);
282 info->last_ins = NULL;
284 }
else if (last_out && last_out->dn.task) {
285 KMP_ACQUIRE_DEPNODE(gtid, last_out);
286 if (last_out->dn.task) {
287 __kmp_track_dependence(last_out, node, task);
288 last_out->dn.successors =
289 __kmp_add_node(thread, last_out->dn.successors, node);
292 (
"__kmp_process_deps<%d>: T#%d adding dependence from %p to %p\n",
293 filter, gtid, KMP_TASK_TO_TASKDATA(last_out->dn.task),
294 KMP_TASK_TO_TASKDATA(task)));
298 KMP_RELEASE_DEPNODE(gtid, last_out);
305 __kmp_node_deref(thread, last_out);
306 info->last_out = NULL;
308 if (dep->flags.out) {
309 __kmp_node_deref(thread, last_out);
310 info->last_out = __kmp_node_ref(node);
312 info->last_ins = __kmp_add_node(thread, info->last_ins, node);
316 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
317 gtid, npredecessors));
319 return npredecessors;
322 #define NO_DEP_BARRIER (false) 323 #define DEP_BARRIER (true) 326 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
327 kmp_task_t *task, kmp_dephash_t *hash,
328 bool dep_barrier, kmp_int32 ndeps,
329 kmp_depend_info_t *dep_list,
330 kmp_int32 ndeps_noalias,
331 kmp_depend_info_t *noalias_dep_list) {
335 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
337 KA_TRACE(20, (
"__kmp_check_deps: T#%d checking dependencies for task %p : %d " 338 "possibly aliased dependencies, %d non-aliased depedencies : " 339 "dep_barrier=%d .\n",
340 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
344 for (i = 0; i < ndeps; i++) {
345 if (dep_list[i].base_addr != 0)
346 for (
int j = i + 1; j < ndeps; j++)
347 if (dep_list[i].base_addr == dep_list[j].base_addr) {
348 dep_list[i].flags.in |= dep_list[j].flags.in;
349 dep_list[i].flags.out |= dep_list[j].flags.out;
350 dep_list[j].base_addr = 0;
358 node->dn.npredecessors = -1;
364 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps,
366 npredecessors += __kmp_process_deps<false>(
367 gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
369 node->dn.task = task;
379 node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
381 KA_TRACE(20, (
"__kmp_check_deps: T#%d found %d predecessors for task %p \n",
382 gtid, npredecessors, taskdata));
386 return npredecessors > 0 ? true :
false;
389 void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) {
390 kmp_info_t *thread = __kmp_threads[gtid];
391 kmp_depnode_t *node = task->td_depnode;
393 if (task->td_dephash) {
395 40, (
"__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n",
397 __kmp_dephash_free(thread, task->td_dephash);
398 task->td_dephash = NULL;
404 KA_TRACE(20, (
"__kmp_release_deps: T#%d notifying successors of task %p.\n",
407 KMP_ACQUIRE_DEPNODE(gtid, node);
410 KMP_RELEASE_DEPNODE(gtid, node);
412 kmp_depnode_list_t *next;
413 for (kmp_depnode_list_t *p = node->dn.successors; p; p = next) {
414 kmp_depnode_t *successor = p->node;
415 kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->dn.npredecessors) - 1;
419 if (npredecessors == 0) {
421 if (successor->dn.task) {
422 KA_TRACE(20, (
"__kmp_release_deps: T#%d successor %p of %p scheduled " 424 gtid, successor->dn.task, task));
425 __kmp_omp_task(gtid, successor->dn.task,
false);
430 __kmp_node_deref(thread, p->node);
432 __kmp_fast_free(thread, p);
434 __kmp_thread_free(thread, p);
438 __kmp_node_deref(thread, node);
442 (
"__kmp_release_deps: T#%d all successors of %p notified of completion\n",
463 kmp_task_t *new_task, kmp_int32 ndeps,
464 kmp_depend_info_t *dep_list,
465 kmp_int32 ndeps_noalias,
466 kmp_depend_info_t *noalias_dep_list) {
468 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
469 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
470 loc_ref, new_taskdata));
472 kmp_info_t *thread = __kmp_threads[gtid];
473 kmp_taskdata_t *current_task = thread->th.th_current_task;
476 if (ompt_enabled.enabled) {
477 OMPT_STORE_RETURN_ADDRESS(gtid);
478 if (!current_task->ompt_task_info.frame.enter_frame)
479 current_task->ompt_task_info.frame.enter_frame =
480 OMPT_GET_FRAME_ADDRESS(1);
481 if (ompt_enabled.ompt_callback_task_create) {
482 ompt_data_t task_data = ompt_data_none;
483 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
484 current_task ? &(current_task->ompt_task_info.task_data) : &task_data,
485 current_task ? &(current_task->ompt_task_info.frame) : NULL,
486 &(new_taskdata->ompt_task_info.task_data),
487 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
488 OMPT_LOAD_RETURN_ADDRESS(gtid));
491 new_taskdata->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(0);
496 if (ndeps + ndeps_noalias > 0 &&
497 ompt_enabled.ompt_callback_task_dependences) {
500 new_taskdata->ompt_task_info.ndeps = ndeps + ndeps_noalias;
501 new_taskdata->ompt_task_info.deps =
502 (ompt_task_dependence_t *)KMP_OMPT_DEPS_ALLOC(
503 thread, (ndeps + ndeps_noalias) *
sizeof(ompt_task_dependence_t));
505 KMP_ASSERT(new_taskdata->ompt_task_info.deps != NULL);
507 for (i = 0; i < ndeps; i++) {
508 new_taskdata->ompt_task_info.deps[i].variable_addr =
509 (
void *)dep_list[i].base_addr;
510 if (dep_list[i].flags.in && dep_list[i].flags.out)
511 new_taskdata->ompt_task_info.deps[i].dependence_flags =
512 ompt_task_dependence_type_inout;
513 else if (dep_list[i].flags.out)
514 new_taskdata->ompt_task_info.deps[i].dependence_flags =
515 ompt_task_dependence_type_out;
516 else if (dep_list[i].flags.in)
517 new_taskdata->ompt_task_info.deps[i].dependence_flags =
518 ompt_task_dependence_type_in;
520 for (i = 0; i < ndeps_noalias; i++) {
521 new_taskdata->ompt_task_info.deps[ndeps + i].variable_addr =
522 (
void *)noalias_dep_list[i].base_addr;
523 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
524 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_flags =
525 ompt_task_dependence_type_inout;
526 else if (noalias_dep_list[i].flags.out)
527 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_flags =
528 ompt_task_dependence_type_out;
529 else if (noalias_dep_list[i].flags.in)
530 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_flags =
531 ompt_task_dependence_type_in;
533 ompt_callbacks.ompt_callback(ompt_callback_task_dependences)(
534 &(new_taskdata->ompt_task_info.task_data),
535 new_taskdata->ompt_task_info.deps, new_taskdata->ompt_task_info.ndeps);
538 KMP_OMPT_DEPS_FREE(thread, new_taskdata->ompt_task_info.deps);
539 new_taskdata->ompt_task_info.deps = NULL;
540 new_taskdata->ompt_task_info.ndeps = 0;
545 bool serial = current_task->td_flags.team_serial ||
546 current_task->td_flags.tasking_ser ||
547 current_task->td_flags.final;
549 kmp_task_team_t *task_team = thread->th.th_task_team;
550 serial = serial && !(task_team && task_team->tt.tt_found_proxy_tasks);
553 if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
555 if (current_task->td_dephash == NULL)
556 current_task->td_dephash = __kmp_dephash_create(thread, current_task);
559 kmp_depnode_t *node =
560 (kmp_depnode_t *)__kmp_fast_allocate(thread,
sizeof(kmp_depnode_t));
562 kmp_depnode_t *node =
563 (kmp_depnode_t *)__kmp_thread_malloc(thread,
sizeof(kmp_depnode_t));
566 __kmp_init_node(node);
567 new_taskdata->td_depnode = node;
569 if (__kmp_check_deps(gtid, node, new_task, current_task->td_dephash,
570 NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
572 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had blocking " 574 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
575 gtid, loc_ref, new_taskdata));
577 if (ompt_enabled.enabled) {
578 current_task->ompt_task_info.frame.enter_frame = NULL;
581 return TASK_CURRENT_NOT_QUEUED;
584 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies " 585 "for task (serialized)" 587 gtid, loc_ref, new_taskdata));
590 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had no blocking " 592 "loc=%p task=%p, transferring to __kmpc_omp_task\n",
593 gtid, loc_ref, new_taskdata));
595 kmp_int32 ret = __kmp_omp_task(gtid, new_task,
true);
597 if (ompt_enabled.enabled) {
598 current_task->ompt_task_info.frame.enter_frame = NULL;
616 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
617 kmp_depend_info_t *noalias_dep_list) {
618 KA_TRACE(10, (
"__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
620 if (ndeps == 0 && ndeps_noalias == 0) {
621 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no dependencies to " 622 "wait upon : loc=%p\n",
627 kmp_info_t *thread = __kmp_threads[gtid];
628 kmp_taskdata_t *current_task = thread->th.th_current_task;
633 bool ignore = current_task->td_flags.team_serial ||
634 current_task->td_flags.tasking_ser ||
635 current_task->td_flags.final;
637 ignore = ignore && thread->th.th_task_team != NULL &&
638 thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
640 ignore = ignore || current_task->td_dephash == NULL;
643 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking " 644 "dependencies : loc=%p\n",
649 kmp_depnode_t node = {0};
650 __kmp_init_node(&node);
652 if (!__kmp_check_deps(gtid, &node, NULL, current_task->td_dephash,
653 DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
655 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking " 656 "dependencies : loc=%p\n",
661 int thread_finished = FALSE;
662 kmp_flag_32 flag((std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
663 while (node.dn.npredecessors > 0) {
664 flag.execute_tasks(thread, gtid, FALSE,
665 &thread_finished USE_ITT_BUILD_ARG(NULL),
666 __kmp_task_stealing_constraint);
669 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)