18 #include "kmp_wait_release.h" 19 #include "kmp_taskdeps.h" 21 #include "ompt-specific.h" 35 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 36 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
39 static void __kmp_init_node(kmp_depnode_t *node) {
42 node->dn.successors = NULL;
43 __kmp_init_lock(&node->dn.lock);
44 KMP_ATOMIC_ST_RLX(&node->dn.nrefs,
46 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 47 node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
51 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
52 KMP_ATOMIC_INC(&node->dn.nrefs);
56 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
58 static inline kmp_int32 __kmp_dephash_hash(kmp_intptr_t addr,
size_t hsize) {
61 return ((addr >> 6) ^ (addr >> 2)) % hsize;
64 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
65 kmp_taskdata_t *current_task) {
70 if (current_task->td_flags.tasktype == TASK_IMPLICIT)
71 h_size = KMP_DEPHASH_MASTER_SIZE;
73 h_size = KMP_DEPHASH_OTHER_SIZE;
76 h_size *
sizeof(kmp_dephash_entry_t *) +
sizeof(kmp_dephash_t);
79 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
81 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
89 h->buckets = (kmp_dephash_entry **)(h + 1);
91 for (
size_t i = 0; i < h_size; i++)
97 static kmp_dephash_entry *
98 __kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t *h, kmp_intptr_t addr) {
99 kmp_int32 bucket = __kmp_dephash_hash(addr, h->size);
101 kmp_dephash_entry_t *entry;
102 for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
103 if (entry->addr == addr)
109 entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
110 thread,
sizeof(kmp_dephash_entry_t));
112 entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
113 thread,
sizeof(kmp_dephash_entry_t));
116 entry->last_out = NULL;
117 entry->last_ins = NULL;
118 entry->next_in_bucket = h->buckets[bucket];
119 h->buckets[bucket] = entry;
122 if (entry->next_in_bucket)
129 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
130 kmp_depnode_list_t *list,
131 kmp_depnode_t *node) {
132 kmp_depnode_list_t *new_head;
135 new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
136 thread,
sizeof(kmp_depnode_list_t));
138 new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
139 thread,
sizeof(kmp_depnode_list_t));
142 new_head->node = __kmp_node_ref(node);
143 new_head->next = list;
148 static inline void __kmp_track_dependence(kmp_depnode_t *source,
150 kmp_task_t *sink_task) {
151 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 152 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
155 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
157 __kmp_printf(
"%d(%s) -> %d(%s)\n", source->dn.id,
158 task_source->td_ident->psource, sink->dn.id,
159 task_sink->td_ident->psource);
161 #if OMPT_SUPPORT && OMPT_OPTIONAL 165 if (ompt_enabled.ompt_callback_task_dependence) {
166 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
167 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
169 ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
170 &(task_source->ompt_task_info.task_data),
171 &(task_sink->ompt_task_info.task_data));
176 template <
bool filter>
177 static inline kmp_int32
178 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
179 bool dep_barrier, kmp_int32 ndeps,
180 kmp_depend_info_t *dep_list, kmp_task_t *task) {
181 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d processing %d dependencies : " 182 "dep_barrier = %d\n",
183 filter, gtid, ndeps, dep_barrier));
185 kmp_info_t *thread = __kmp_threads[gtid];
186 kmp_int32 npredecessors = 0;
187 for (kmp_int32 i = 0; i < ndeps; i++) {
188 const kmp_depend_info_t *dep = &dep_list[i];
190 KMP_DEBUG_ASSERT(dep->flags.in);
192 if (filter && dep->base_addr == 0)
195 kmp_dephash_entry_t *info =
196 __kmp_dephash_find(thread, hash, dep->base_addr);
197 kmp_depnode_t *last_out = info->last_out;
199 if (dep->flags.out && info->last_ins) {
200 for (kmp_depnode_list_t *p = info->last_ins; p; p = p->next) {
201 kmp_depnode_t *indep = p->node;
202 if (indep->dn.task) {
203 KMP_ACQUIRE_DEPNODE(gtid, indep);
204 if (indep->dn.task) {
205 __kmp_track_dependence(indep, node, task);
206 indep->dn.successors =
207 __kmp_add_node(thread, indep->dn.successors, node);
208 KA_TRACE(40, (
"__kmp_process_deps<%d>: T#%d adding dependence from " 210 filter, gtid, KMP_TASK_TO_TASKDATA(indep->dn.task),
211 KMP_TASK_TO_TASKDATA(task)));
214 KMP_RELEASE_DEPNODE(gtid, indep);
218 __kmp_depnode_list_free(thread, info->last_ins);
219 info->last_ins = NULL;
221 }
else if (last_out && last_out->dn.task) {
222 KMP_ACQUIRE_DEPNODE(gtid, last_out);
223 if (last_out->dn.task) {
224 __kmp_track_dependence(last_out, node, task);
225 last_out->dn.successors =
226 __kmp_add_node(thread, last_out->dn.successors, node);
229 (
"__kmp_process_deps<%d>: T#%d adding dependence from %p to %p\n",
230 filter, gtid, KMP_TASK_TO_TASKDATA(last_out->dn.task),
231 KMP_TASK_TO_TASKDATA(task)));
235 KMP_RELEASE_DEPNODE(gtid, last_out);
242 __kmp_node_deref(thread, last_out);
243 info->last_out = NULL;
245 if (dep->flags.out) {
246 __kmp_node_deref(thread, last_out);
247 info->last_out = __kmp_node_ref(node);
249 info->last_ins = __kmp_add_node(thread, info->last_ins, node);
253 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
254 gtid, npredecessors));
256 return npredecessors;
259 #define NO_DEP_BARRIER (false) 260 #define DEP_BARRIER (true) 263 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
264 kmp_task_t *task, kmp_dephash_t *hash,
265 bool dep_barrier, kmp_int32 ndeps,
266 kmp_depend_info_t *dep_list,
267 kmp_int32 ndeps_noalias,
268 kmp_depend_info_t *noalias_dep_list) {
272 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
274 KA_TRACE(20, (
"__kmp_check_deps: T#%d checking dependencies for task %p : %d " 275 "possibly aliased dependencies, %d non-aliased depedencies : " 276 "dep_barrier=%d .\n",
277 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
281 for (i = 0; i < ndeps; i++) {
282 if (dep_list[i].base_addr != 0)
283 for (
int j = i + 1; j < ndeps; j++)
284 if (dep_list[i].base_addr == dep_list[j].base_addr) {
285 dep_list[i].flags.in |= dep_list[j].flags.in;
286 dep_list[i].flags.out |= dep_list[j].flags.out;
287 dep_list[j].base_addr = 0;
295 node->dn.npredecessors = -1;
301 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps,
303 npredecessors += __kmp_process_deps<false>(
304 gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
306 node->dn.task = task;
316 node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
318 KA_TRACE(20, (
"__kmp_check_deps: T#%d found %d predecessors for task %p \n",
319 gtid, npredecessors, taskdata));
323 return npredecessors > 0 ? true :
false;
343 kmp_task_t *new_task, kmp_int32 ndeps,
344 kmp_depend_info_t *dep_list,
345 kmp_int32 ndeps_noalias,
346 kmp_depend_info_t *noalias_dep_list) {
348 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
349 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
350 loc_ref, new_taskdata));
352 kmp_info_t *thread = __kmp_threads[gtid];
353 kmp_taskdata_t *current_task = thread->th.th_current_task;
356 if (ompt_enabled.enabled) {
357 OMPT_STORE_RETURN_ADDRESS(gtid);
358 if (!current_task->ompt_task_info.frame.enter_frame)
359 current_task->ompt_task_info.frame.enter_frame =
360 OMPT_GET_FRAME_ADDRESS(1);
361 if (ompt_enabled.ompt_callback_task_create) {
362 ompt_data_t task_data = ompt_data_none;
363 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
364 current_task ? &(current_task->ompt_task_info.task_data) : &task_data,
365 current_task ? &(current_task->ompt_task_info.frame) : NULL,
366 &(new_taskdata->ompt_task_info.task_data),
367 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
368 OMPT_LOAD_RETURN_ADDRESS(gtid));
371 new_taskdata->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(0);
376 if (ndeps + ndeps_noalias > 0 &&
377 ompt_enabled.ompt_callback_task_dependences) {
380 new_taskdata->ompt_task_info.ndeps = ndeps + ndeps_noalias;
381 new_taskdata->ompt_task_info.deps =
382 (ompt_task_dependence_t *)KMP_OMPT_DEPS_ALLOC(
383 thread, (ndeps + ndeps_noalias) *
sizeof(ompt_task_dependence_t));
385 KMP_ASSERT(new_taskdata->ompt_task_info.deps != NULL);
387 for (i = 0; i < ndeps; i++) {
388 new_taskdata->ompt_task_info.deps[i].variable_addr =
389 (
void *)dep_list[i].base_addr;
390 if (dep_list[i].flags.in && dep_list[i].flags.out)
391 new_taskdata->ompt_task_info.deps[i].dependence_type =
392 ompt_task_dependence_type_inout;
393 else if (dep_list[i].flags.out)
394 new_taskdata->ompt_task_info.deps[i].dependence_type =
395 ompt_task_dependence_type_out;
396 else if (dep_list[i].flags.in)
397 new_taskdata->ompt_task_info.deps[i].dependence_type =
398 ompt_task_dependence_type_in;
400 for (i = 0; i < ndeps_noalias; i++) {
401 new_taskdata->ompt_task_info.deps[ndeps + i].variable_addr =
402 (
void *)noalias_dep_list[i].base_addr;
403 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
404 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
405 ompt_task_dependence_type_inout;
406 else if (noalias_dep_list[i].flags.out)
407 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
408 ompt_task_dependence_type_out;
409 else if (noalias_dep_list[i].flags.in)
410 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
411 ompt_task_dependence_type_in;
413 ompt_callbacks.ompt_callback(ompt_callback_task_dependences)(
414 &(new_taskdata->ompt_task_info.task_data),
415 new_taskdata->ompt_task_info.deps, new_taskdata->ompt_task_info.ndeps);
418 KMP_OMPT_DEPS_FREE(thread, new_taskdata->ompt_task_info.deps);
419 new_taskdata->ompt_task_info.deps = NULL;
420 new_taskdata->ompt_task_info.ndeps = 0;
425 bool serial = current_task->td_flags.team_serial ||
426 current_task->td_flags.tasking_ser ||
427 current_task->td_flags.final;
429 kmp_task_team_t *task_team = thread->th.th_task_team;
430 serial = serial && !(task_team && task_team->tt.tt_found_proxy_tasks);
433 if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
435 if (current_task->td_dephash == NULL)
436 current_task->td_dephash = __kmp_dephash_create(thread, current_task);
439 kmp_depnode_t *node =
440 (kmp_depnode_t *)__kmp_fast_allocate(thread,
sizeof(kmp_depnode_t));
442 kmp_depnode_t *node =
443 (kmp_depnode_t *)__kmp_thread_malloc(thread,
sizeof(kmp_depnode_t));
446 __kmp_init_node(node);
447 new_taskdata->td_depnode = node;
449 if (__kmp_check_deps(gtid, node, new_task, current_task->td_dephash,
450 NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
452 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had blocking " 454 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
455 gtid, loc_ref, new_taskdata));
457 if (ompt_enabled.enabled) {
458 current_task->ompt_task_info.frame.enter_frame = NULL;
461 return TASK_CURRENT_NOT_QUEUED;
464 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies " 465 "for task (serialized)" 467 gtid, loc_ref, new_taskdata));
470 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had no blocking " 472 "loc=%p task=%p, transferring to __kmpc_omp_task\n",
473 gtid, loc_ref, new_taskdata));
475 kmp_int32 ret = __kmp_omp_task(gtid, new_task,
true);
477 if (ompt_enabled.enabled) {
478 current_task->ompt_task_info.frame.enter_frame = NULL;
496 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
497 kmp_depend_info_t *noalias_dep_list) {
498 KA_TRACE(10, (
"__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
500 if (ndeps == 0 && ndeps_noalias == 0) {
501 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no dependencies to " 502 "wait upon : loc=%p\n",
507 kmp_info_t *thread = __kmp_threads[gtid];
508 kmp_taskdata_t *current_task = thread->th.th_current_task;
513 bool ignore = current_task->td_flags.team_serial ||
514 current_task->td_flags.tasking_ser ||
515 current_task->td_flags.final;
517 ignore = ignore && thread->th.th_task_team != NULL &&
518 thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
520 ignore = ignore || current_task->td_dephash == NULL;
523 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking " 524 "dependencies : loc=%p\n",
529 kmp_depnode_t node = {0};
530 __kmp_init_node(&node);
532 if (!__kmp_check_deps(gtid, &node, NULL, current_task->td_dephash,
533 DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
535 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking " 536 "dependencies : loc=%p\n",
541 int thread_finished = FALSE;
542 kmp_flag_32 flag((std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
543 while (node.dn.npredecessors > 0) {
544 flag.execute_tasks(thread, gtid, FALSE,
545 &thread_finished USE_ITT_BUILD_ARG(NULL),
546 __kmp_task_stealing_constraint);
549 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)