20 #include "kmp_wait_release.h" 31 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 32 static kmp_int32 kmp_node_id_seed = 0;
36 __kmp_init_node ( kmp_depnode_t *node )
39 node->dn.successors = NULL;
40 __kmp_init_lock(&node->dn.lock);
42 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 43 node->dn.id = KMP_TEST_THEN_INC32(&kmp_node_id_seed);
47 static inline kmp_depnode_t *
48 __kmp_node_ref ( kmp_depnode_t *node )
50 KMP_TEST_THEN_INC32(&node->dn.nrefs);
55 __kmp_node_deref ( kmp_info_t *thread, kmp_depnode_t *node )
59 kmp_int32 n = KMP_TEST_THEN_DEC32(&node->dn.nrefs) - 1;
61 KMP_ASSERT(node->dn.nrefs == 0);
63 __kmp_fast_free(thread,node);
65 __kmp_thread_free(thread,node);
70 #define KMP_ACQUIRE_DEPNODE(gtid,n) __kmp_acquire_lock(&(n)->dn.lock,(gtid)) 71 #define KMP_RELEASE_DEPNODE(gtid,n) __kmp_release_lock(&(n)->dn.lock,(gtid)) 74 __kmp_depnode_list_free ( kmp_info_t *thread, kmp_depnode_list *list );
77 KMP_DEPHASH_OTHER_SIZE = 97,
78 KMP_DEPHASH_MASTER_SIZE = 997
81 static inline kmp_int32
82 __kmp_dephash_hash ( kmp_intptr_t addr,
size_t hsize )
85 return ((addr >> 6) ^ (addr >> 2)) % hsize;
88 static kmp_dephash_t *
89 __kmp_dephash_create ( kmp_info_t *thread, kmp_taskdata_t *current_task )
95 if ( current_task->td_flags.tasktype == TASK_IMPLICIT )
96 h_size = KMP_DEPHASH_MASTER_SIZE;
98 h_size = KMP_DEPHASH_OTHER_SIZE;
101 h_size *
sizeof(kmp_dephash_entry_t *) +
sizeof(kmp_dephash_t);
104 h = (kmp_dephash_t *) __kmp_fast_allocate( thread, size );
106 h = (kmp_dephash_t *) __kmp_thread_malloc( thread, size );
114 h->buckets = (kmp_dephash_entry **)(h+1);
116 for (
size_t i = 0; i < h_size; i++ )
123 __kmp_dephash_free_entries(kmp_info_t *thread, kmp_dephash_t *h)
125 for (
size_t i = 0; i < h->size; i++) {
127 kmp_dephash_entry_t *next;
128 for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) {
129 next = entry->next_in_bucket;
130 __kmp_depnode_list_free(thread,entry->last_ins);
131 __kmp_node_deref(thread,entry->last_out);
133 __kmp_fast_free(thread,entry);
135 __kmp_thread_free(thread,entry);
144 __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h)
146 __kmp_dephash_free_entries(thread, h);
148 __kmp_fast_free(thread,h);
150 __kmp_thread_free(thread,h);
154 static kmp_dephash_entry *
155 __kmp_dephash_find ( kmp_info_t *thread, kmp_dephash_t *h, kmp_intptr_t addr )
157 kmp_int32 bucket = __kmp_dephash_hash(addr,h->size);
159 kmp_dephash_entry_t *entry;
160 for ( entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket )
161 if ( entry->addr == addr )
break;
163 if ( entry == NULL ) {
166 entry = (kmp_dephash_entry_t *) __kmp_fast_allocate( thread,
sizeof(kmp_dephash_entry_t) );
168 entry = (kmp_dephash_entry_t *) __kmp_thread_malloc( thread,
sizeof(kmp_dephash_entry_t) );
171 entry->last_out = NULL;
172 entry->last_ins = NULL;
173 entry->next_in_bucket = h->buckets[bucket];
174 h->buckets[bucket] = entry;
177 if ( entry->next_in_bucket ) h->nconflicts++;
183 static kmp_depnode_list_t *
184 __kmp_add_node ( kmp_info_t *thread, kmp_depnode_list_t *list, kmp_depnode_t *node )
186 kmp_depnode_list_t *new_head;
189 new_head = (kmp_depnode_list_t *) __kmp_fast_allocate(thread,
sizeof(kmp_depnode_list_t));
191 new_head = (kmp_depnode_list_t *) __kmp_thread_malloc(thread,
sizeof(kmp_depnode_list_t));
194 new_head->node = __kmp_node_ref(node);
195 new_head->next = list;
201 __kmp_depnode_list_free ( kmp_info_t *thread, kmp_depnode_list *list )
203 kmp_depnode_list *next;
205 for ( ; list ; list = next ) {
208 __kmp_node_deref(thread,list->node);
210 __kmp_fast_free(thread,list);
212 __kmp_thread_free(thread,list);
218 __kmp_track_dependence ( kmp_depnode_t *source, kmp_depnode_t *sink,
219 kmp_task_t *sink_task )
221 #ifdef KMP_SUPPORT_GRAPH_OUTPUT 222 kmp_taskdata_t * task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
225 kmp_taskdata_t * task_sink = KMP_TASK_TO_TASKDATA(sink_task);
227 __kmp_printf(
"%d(%s) -> %d(%s)\n", source->dn.id, task_source->td_ident->psource, sink->dn.id, task_sink->td_ident->psource);
229 #if OMPT_SUPPORT && OMPT_TRACE 233 ompt_callbacks.ompt_callback(ompt_event_task_dependence_pair))
235 kmp_taskdata_t * task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
236 kmp_taskdata_t * task_sink = KMP_TASK_TO_TASKDATA(sink_task);
238 ompt_callbacks.ompt_callback(ompt_event_task_dependence_pair)(
239 task_source->ompt_task_info.task_id,
240 task_sink->ompt_task_info.task_id);
245 template<
bool filter >
246 static inline kmp_int32
247 __kmp_process_deps ( kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
248 bool dep_barrier,kmp_int32 ndeps, kmp_depend_info_t *dep_list,
251 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d processing %d dependencies : dep_barrier = %d\n", filter, gtid, ndeps, dep_barrier ) );
253 kmp_info_t *thread = __kmp_threads[ gtid ];
254 kmp_int32 npredecessors=0;
255 for ( kmp_int32 i = 0; i < ndeps ; i++ ) {
256 const kmp_depend_info_t * dep = &dep_list[i];
258 KMP_DEBUG_ASSERT(dep->flags.in);
260 if ( filter && dep->base_addr == 0 )
continue;
262 kmp_dephash_entry_t *info = __kmp_dephash_find(thread,hash,dep->base_addr);
263 kmp_depnode_t *last_out = info->last_out;
265 if ( dep->flags.out && info->last_ins ) {
266 for ( kmp_depnode_list_t * p = info->last_ins; p; p = p->next ) {
267 kmp_depnode_t * indep = p->node;
268 if ( indep->dn.task ) {
269 KMP_ACQUIRE_DEPNODE(gtid,indep);
270 if ( indep->dn.task ) {
271 __kmp_track_dependence(indep,node,task);
272 indep->dn.successors = __kmp_add_node(thread, indep->dn.successors, node);
273 KA_TRACE(40,(
"__kmp_process_deps<%d>: T#%d adding dependence from %p to %p\n",
274 filter,gtid, KMP_TASK_TO_TASKDATA(indep->dn.task), KMP_TASK_TO_TASKDATA(task)));
277 KMP_RELEASE_DEPNODE(gtid,indep);
281 __kmp_depnode_list_free(thread,info->last_ins);
282 info->last_ins = NULL;
284 }
else if ( last_out && last_out->dn.task ) {
285 KMP_ACQUIRE_DEPNODE(gtid,last_out);
286 if ( last_out->dn.task ) {
287 __kmp_track_dependence(last_out,node,task);
288 last_out->dn.successors = __kmp_add_node(thread, last_out->dn.successors, node);
289 KA_TRACE(40,(
"__kmp_process_deps<%d>: T#%d adding dependence from %p to %p\n",
290 filter,gtid, KMP_TASK_TO_TASKDATA(last_out->dn.task), KMP_TASK_TO_TASKDATA(task)));
294 KMP_RELEASE_DEPNODE(gtid,last_out);
300 __kmp_node_deref(thread,last_out);
301 info->last_out = NULL;
303 if ( dep->flags.out ) {
304 __kmp_node_deref(thread,last_out);
305 info->last_out = __kmp_node_ref(node);
307 info->last_ins = __kmp_add_node(thread, info->last_ins, node);
312 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter, gtid, npredecessors ) );
314 return npredecessors;
317 #define NO_DEP_BARRIER (false) 318 #define DEP_BARRIER (true) 322 __kmp_check_deps ( kmp_int32 gtid, kmp_depnode_t *node, kmp_task_t *task, kmp_dephash_t *hash,
bool dep_barrier,
323 kmp_int32 ndeps, kmp_depend_info_t *dep_list,
324 kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
329 kmp_taskdata_t * taskdata = KMP_TASK_TO_TASKDATA(task);
331 KA_TRACE(20, (
"__kmp_check_deps: T#%d checking dependencies for task %p : %d possibly aliased dependencies, %d non-aliased depedencies : dep_barrier=%d .\n", gtid, taskdata, ndeps, ndeps_noalias, dep_barrier ) );
335 for ( i = 0; i < ndeps; i ++ ) {
336 if ( dep_list[i].base_addr != 0 )
337 for (
int j = i+1; j < ndeps; j++ )
338 if ( dep_list[i].base_addr == dep_list[j].base_addr ) {
339 dep_list[i].flags.in |= dep_list[j].flags.in;
340 dep_list[i].flags.out |= dep_list[j].flags.out;
341 dep_list[j].base_addr = 0;
347 node->dn.npredecessors = -1;
352 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier,
353 ndeps, dep_list, task);
354 npredecessors += __kmp_process_deps<false>(gtid, node, hash, dep_barrier,
355 ndeps_noalias, noalias_dep_list, task);
357 node->dn.task = task;
364 npredecessors = KMP_TEST_THEN_ADD32(&node->dn.npredecessors, npredecessors) + npredecessors;
366 KA_TRACE(20, (
"__kmp_check_deps: T#%d found %d predecessors for task %p \n", gtid, npredecessors, taskdata ) );
369 return npredecessors > 0 ? true :
false;
373 __kmp_release_deps ( kmp_int32 gtid, kmp_taskdata_t *task )
375 kmp_info_t *thread = __kmp_threads[ gtid ];
376 kmp_depnode_t *node = task->td_depnode;
378 if ( task->td_dephash ) {
379 KA_TRACE(40, (
"__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n", gtid, task ) );
380 __kmp_dephash_free(thread,task->td_dephash);
381 task->td_dephash = NULL;
386 KA_TRACE(20, (
"__kmp_release_deps: T#%d notifying successors of task %p.\n", gtid, task ) );
388 KMP_ACQUIRE_DEPNODE(gtid,node);
389 node->dn.task = NULL;
390 KMP_RELEASE_DEPNODE(gtid,node);
392 kmp_depnode_list_t *next;
393 for ( kmp_depnode_list_t *p = node->dn.successors; p; p = next ) {
394 kmp_depnode_t *successor = p->node;
395 kmp_int32 npredecessors = KMP_TEST_THEN_DEC32(&successor->dn.npredecessors) - 1;
398 if ( npredecessors == 0 ) {
400 if ( successor->dn.task ) {
401 KA_TRACE(20, (
"__kmp_release_deps: T#%d successor %p of %p scheduled for execution.\n", gtid, successor->dn.task, task ) );
402 __kmp_omp_task(gtid,successor->dn.task,
false);
407 __kmp_node_deref(thread,p->node);
409 __kmp_fast_free(thread,p);
411 __kmp_thread_free(thread,p);
415 __kmp_node_deref(thread,node);
417 KA_TRACE(20, (
"__kmp_release_deps: T#%d all successors of %p notified of completion\n", gtid, task ) );
436 kmp_int32 ndeps, kmp_depend_info_t *dep_list,
437 kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
440 kmp_taskdata_t * new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
441 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n",
442 gtid, loc_ref, new_taskdata ) );
444 kmp_info_t *thread = __kmp_threads[ gtid ];
445 kmp_taskdata_t * current_task = thread->th.th_current_task;
447 #if OMPT_SUPPORT && OMPT_TRACE 449 if (ompt_enabled && ndeps+ndeps_noalias > 0 &&
450 ompt_callbacks.ompt_callback(ompt_event_task_dependences))
454 new_taskdata->ompt_task_info.ndeps = ndeps+ndeps_noalias;
455 new_taskdata->ompt_task_info.deps = (ompt_task_dependence_t *)
456 KMP_OMPT_DEPS_ALLOC(thread,
457 (ndeps+ndeps_noalias)*
sizeof(ompt_task_dependence_t));
459 KMP_ASSERT(new_taskdata->ompt_task_info.deps != NULL);
461 for (i = 0; i < ndeps; i++)
463 new_taskdata->ompt_task_info.deps[i].variable_addr =
464 (
void*) dep_list[i].base_addr;
465 if (dep_list[i].flags.in && dep_list[i].flags.out)
466 new_taskdata->ompt_task_info.deps[i].dependence_flags =
467 ompt_task_dependence_type_inout;
468 else if (dep_list[i].flags.out)
469 new_taskdata->ompt_task_info.deps[i].dependence_flags =
470 ompt_task_dependence_type_out;
471 else if (dep_list[i].flags.in)
472 new_taskdata->ompt_task_info.deps[i].dependence_flags =
473 ompt_task_dependence_type_in;
475 for (i = 0; i < ndeps_noalias; i++)
477 new_taskdata->ompt_task_info.deps[ndeps+i].variable_addr =
478 (
void*) noalias_dep_list[i].base_addr;
479 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
480 new_taskdata->ompt_task_info.deps[ndeps+i].dependence_flags =
481 ompt_task_dependence_type_inout;
482 else if (noalias_dep_list[i].flags.out)
483 new_taskdata->ompt_task_info.deps[ndeps+i].dependence_flags =
484 ompt_task_dependence_type_out;
485 else if (noalias_dep_list[i].flags.in)
486 new_taskdata->ompt_task_info.deps[ndeps+i].dependence_flags =
487 ompt_task_dependence_type_in;
492 bool serial = current_task->td_flags.team_serial || current_task->td_flags.tasking_ser || current_task->td_flags.final;
494 kmp_task_team_t * task_team = thread->th.th_task_team;
495 serial = serial && !(task_team && task_team->tt.tt_found_proxy_tasks);
498 if ( !serial && ( ndeps > 0 || ndeps_noalias > 0 )) {
500 if ( current_task->td_dephash == NULL )
501 current_task->td_dephash = __kmp_dephash_create(thread, current_task);
504 kmp_depnode_t *node = (kmp_depnode_t *) __kmp_fast_allocate(thread,
sizeof(kmp_depnode_t));
506 kmp_depnode_t *node = (kmp_depnode_t *) __kmp_thread_malloc(thread,
sizeof(kmp_depnode_t));
509 __kmp_init_node(node);
510 new_taskdata->td_depnode = node;
512 if ( __kmp_check_deps( gtid, node, new_task, current_task->td_dephash, NO_DEP_BARRIER,
513 ndeps, dep_list, ndeps_noalias,noalias_dep_list ) ) {
514 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had blocking dependencies: " 515 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n", gtid, loc_ref,
517 return TASK_CURRENT_NOT_QUEUED;
520 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies for task (serialized)" 521 "loc=%p task=%p\n", gtid, loc_ref, new_taskdata ) );
524 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had no blocking dependencies : " 525 "loc=%p task=%p, transferring to __kmpc_omp_task\n", gtid, loc_ref,
528 return __kmpc_omp_task(loc_ref,gtid,new_task);
544 kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list )
546 KA_TRACE(10, (
"__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref) );
548 if ( ndeps == 0 && ndeps_noalias == 0 ) {
549 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no dependencies to wait upon : loc=%p\n", gtid, loc_ref) );
553 kmp_info_t *thread = __kmp_threads[ gtid ];
554 kmp_taskdata_t * current_task = thread->th.th_current_task;
559 bool ignore = current_task->td_flags.team_serial || current_task->td_flags.tasking_ser || current_task->td_flags.final;
561 ignore = ignore && thread->th.th_task_team != NULL && thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
563 ignore = ignore || current_task->td_dephash == NULL;
566 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking dependencies : loc=%p\n", gtid, loc_ref) );
571 __kmp_init_node(&node);
573 if (!__kmp_check_deps( gtid, &node, NULL, current_task->td_dephash, DEP_BARRIER,
574 ndeps, dep_list, ndeps_noalias, noalias_dep_list )) {
575 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking dependencies : loc=%p\n", gtid, loc_ref) );
579 int thread_finished = FALSE;
580 kmp_flag_32 flag((
volatile kmp_uint32 *)&(node.dn.npredecessors), 0U);
581 while ( node.dn.npredecessors > 0 ) {
582 flag.execute_tasks(thread, gtid, FALSE, &thread_finished,
586 __kmp_task_stealing_constraint );
589 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n", gtid, loc_ref) );
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)