LLVM OpenMP* Runtime Library
kmp_taskdeps.cpp
1 /*
2  * kmp_taskdeps.cpp
3  */
4 
5 //===----------------------------------------------------------------------===//
6 //
7 // The LLVM Compiler Infrastructure
8 //
9 // This file is dual licensed under the MIT and the University of Illinois Open
10 // Source Licenses. See LICENSE.txt for details.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 //#define KMP_SUPPORT_GRAPH_OUTPUT 1
15 
16 #include "kmp.h"
17 #include "kmp_io.h"
18 #include "kmp_wait_release.h"
19 #include "kmp_taskdeps.h"
20 #if OMPT_SUPPORT
21 #include "ompt-specific.h"
22 #endif
23 
24 #if OMP_40_ENABLED
25 
26 // TODO: Improve memory allocation? keep a list of pre-allocated structures?
27 // allocate in blocks? re-use list finished list entries?
28 // TODO: don't use atomic ref counters for stack-allocated nodes.
29 // TODO: find an alternate to atomic refs for heap-allocated nodes?
30 // TODO: Finish graph output support
31 // TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other
32 // runtime locks
33 // TODO: Any ITT support needed?
34 
35 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
36 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
37 #endif
38 
39 static void __kmp_init_node(kmp_depnode_t *node) {
40  node->dn.task = NULL; // set to null initially, it will point to the right
41  // task once dependences have been processed
42  node->dn.successors = NULL;
43  __kmp_init_lock(&node->dn.lock);
44  KMP_ATOMIC_ST_RLX(&node->dn.nrefs,
45  1); // init creates the first reference to the node
46 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
47  node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
48 #endif
49 }
50 
51 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
52  KMP_ATOMIC_INC(&node->dn.nrefs);
53  return node;
54 }
55 
56 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
57 
58 static inline kmp_int32 __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) {
59  // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) %
60  // m_num_sets );
61  return ((addr >> 6) ^ (addr >> 2)) % hsize;
62 }
63 
64 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
65  kmp_taskdata_t *current_task) {
66  kmp_dephash_t *h;
67 
68  size_t h_size;
69 
70  if (current_task->td_flags.tasktype == TASK_IMPLICIT)
71  h_size = KMP_DEPHASH_MASTER_SIZE;
72  else
73  h_size = KMP_DEPHASH_OTHER_SIZE;
74 
75  kmp_int32 size =
76  h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t);
77 
78 #if USE_FAST_MEMORY
79  h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
80 #else
81  h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
82 #endif
83  h->size = h_size;
84 
85 #ifdef KMP_DEBUG
86  h->nelements = 0;
87  h->nconflicts = 0;
88 #endif
89  h->buckets = (kmp_dephash_entry **)(h + 1);
90 
91  for (size_t i = 0; i < h_size; i++)
92  h->buckets[i] = 0;
93 
94  return h;
95 }
96 
97 static kmp_dephash_entry *
98 __kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t *h, kmp_intptr_t addr) {
99  kmp_int32 bucket = __kmp_dephash_hash(addr, h->size);
100 
101  kmp_dephash_entry_t *entry;
102  for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
103  if (entry->addr == addr)
104  break;
105 
106  if (entry == NULL) {
107 // create entry. This is only done by one thread so no locking required
108 #if USE_FAST_MEMORY
109  entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
110  thread, sizeof(kmp_dephash_entry_t));
111 #else
112  entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
113  thread, sizeof(kmp_dephash_entry_t));
114 #endif
115  entry->addr = addr;
116  entry->last_out = NULL;
117  entry->last_ins = NULL;
118  entry->next_in_bucket = h->buckets[bucket];
119  h->buckets[bucket] = entry;
120 #ifdef KMP_DEBUG
121  h->nelements++;
122  if (entry->next_in_bucket)
123  h->nconflicts++;
124 #endif
125  }
126  return entry;
127 }
128 
129 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
130  kmp_depnode_list_t *list,
131  kmp_depnode_t *node) {
132  kmp_depnode_list_t *new_head;
133 
134 #if USE_FAST_MEMORY
135  new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
136  thread, sizeof(kmp_depnode_list_t));
137 #else
138  new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
139  thread, sizeof(kmp_depnode_list_t));
140 #endif
141 
142  new_head->node = __kmp_node_ref(node);
143  new_head->next = list;
144 
145  return new_head;
146 }
147 
148 static inline void __kmp_track_dependence(kmp_depnode_t *source,
149  kmp_depnode_t *sink,
150  kmp_task_t *sink_task) {
151 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
152  kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
153  // do not use sink->dn.task as that is only filled after the dependencies
154  // are already processed!
155  kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
156 
157  __kmp_printf("%d(%s) -> %d(%s)\n", source->dn.id,
158  task_source->td_ident->psource, sink->dn.id,
159  task_sink->td_ident->psource);
160 #endif
161 #if OMPT_SUPPORT && OMPT_OPTIONAL
162  /* OMPT tracks dependences between task (a=source, b=sink) in which
163  task a blocks the execution of b through the ompt_new_dependence_callback
164  */
165  if (ompt_enabled.ompt_callback_task_dependence) {
166  kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
167  kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
168 
169  ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
170  &(task_source->ompt_task_info.task_data),
171  &(task_sink->ompt_task_info.task_data));
172  }
173 #endif /* OMPT_SUPPORT && OMPT_OPTIONAL */
174 }
175 
176 template <bool filter>
177 static inline kmp_int32
178 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *hash,
179  bool dep_barrier, kmp_int32 ndeps,
180  kmp_depend_info_t *dep_list, kmp_task_t *task) {
181  KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependencies : "
182  "dep_barrier = %d\n",
183  filter, gtid, ndeps, dep_barrier));
184 
185  kmp_info_t *thread = __kmp_threads[gtid];
186  kmp_int32 npredecessors = 0;
187  for (kmp_int32 i = 0; i < ndeps; i++) {
188  const kmp_depend_info_t *dep = &dep_list[i];
189 
190  KMP_DEBUG_ASSERT(dep->flags.in);
191 
192  if (filter && dep->base_addr == 0)
193  continue; // skip filtered entries
194 
195  kmp_dephash_entry_t *info =
196  __kmp_dephash_find(thread, hash, dep->base_addr);
197  kmp_depnode_t *last_out = info->last_out;
198 
199  if (dep->flags.out && info->last_ins) {
200  for (kmp_depnode_list_t *p = info->last_ins; p; p = p->next) {
201  kmp_depnode_t *indep = p->node;
202  if (indep->dn.task) {
203  KMP_ACQUIRE_DEPNODE(gtid, indep);
204  if (indep->dn.task) {
205  __kmp_track_dependence(indep, node, task);
206  indep->dn.successors =
207  __kmp_add_node(thread, indep->dn.successors, node);
208  KA_TRACE(40, ("__kmp_process_deps<%d>: T#%d adding dependence from "
209  "%p to %p\n",
210  filter, gtid, KMP_TASK_TO_TASKDATA(indep->dn.task),
211  KMP_TASK_TO_TASKDATA(task)));
212  npredecessors++;
213  }
214  KMP_RELEASE_DEPNODE(gtid, indep);
215  }
216  }
217 
218  __kmp_depnode_list_free(thread, info->last_ins);
219  info->last_ins = NULL;
220 
221  } else if (last_out && last_out->dn.task) {
222  KMP_ACQUIRE_DEPNODE(gtid, last_out);
223  if (last_out->dn.task) {
224  __kmp_track_dependence(last_out, node, task);
225  last_out->dn.successors =
226  __kmp_add_node(thread, last_out->dn.successors, node);
227  KA_TRACE(
228  40,
229  ("__kmp_process_deps<%d>: T#%d adding dependence from %p to %p\n",
230  filter, gtid, KMP_TASK_TO_TASKDATA(last_out->dn.task),
231  KMP_TASK_TO_TASKDATA(task)));
232 
233  npredecessors++;
234  }
235  KMP_RELEASE_DEPNODE(gtid, last_out);
236  }
237 
238  if (dep_barrier) {
239  // if this is a sync point in the serial sequence, then the previous
240  // outputs are guaranteed to be completed after
241  // the execution of this task so the previous output nodes can be cleared.
242  __kmp_node_deref(thread, last_out);
243  info->last_out = NULL;
244  } else {
245  if (dep->flags.out) {
246  __kmp_node_deref(thread, last_out);
247  info->last_out = __kmp_node_ref(node);
248  } else
249  info->last_ins = __kmp_add_node(thread, info->last_ins, node);
250  }
251  }
252 
253  KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
254  gtid, npredecessors));
255 
256  return npredecessors;
257 }
258 
259 #define NO_DEP_BARRIER (false)
260 #define DEP_BARRIER (true)
261 
262 // returns true if the task has any outstanding dependence
263 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
264  kmp_task_t *task, kmp_dephash_t *hash,
265  bool dep_barrier, kmp_int32 ndeps,
266  kmp_depend_info_t *dep_list,
267  kmp_int32 ndeps_noalias,
268  kmp_depend_info_t *noalias_dep_list) {
269  int i;
270 
271 #if KMP_DEBUG
272  kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
273 #endif
274  KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependencies for task %p : %d "
275  "possibly aliased dependencies, %d non-aliased depedencies : "
276  "dep_barrier=%d .\n",
277  gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
278 
279  // Filter deps in dep_list
280  // TODO: Different algorithm for large dep_list ( > 10 ? )
281  for (i = 0; i < ndeps; i++) {
282  if (dep_list[i].base_addr != 0)
283  for (int j = i + 1; j < ndeps; j++)
284  if (dep_list[i].base_addr == dep_list[j].base_addr) {
285  dep_list[i].flags.in |= dep_list[j].flags.in;
286  dep_list[i].flags.out |= dep_list[j].flags.out;
287  dep_list[j].base_addr = 0; // Mark j element as void
288  }
289  }
290 
291  // doesn't need to be atomic as no other thread is going to be accessing this
292  // node just yet.
293  // npredecessors is set -1 to ensure that none of the releasing tasks queues
294  // this task before we have finished processing all the dependencies
295  node->dn.npredecessors = -1;
296 
297  // used to pack all npredecessors additions into a single atomic operation at
298  // the end
299  int npredecessors;
300 
301  npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps,
302  dep_list, task);
303  npredecessors += __kmp_process_deps<false>(
304  gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
305 
306  node->dn.task = task;
307  KMP_MB();
308 
309  // Account for our initial fake value
310  npredecessors++;
311 
312  // Update predecessors and obtain current value to check if there are still
313  // any outstandig dependences (some tasks may have finished while we processed
314  // the dependences)
315  npredecessors =
316  node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
317 
318  KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n",
319  gtid, npredecessors, taskdata));
320 
321  // beyond this point the task could be queued (and executed) by a releasing
322  // task...
323  return npredecessors > 0 ? true : false;
324 }
325 
342 kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid,
343  kmp_task_t *new_task, kmp_int32 ndeps,
344  kmp_depend_info_t *dep_list,
345  kmp_int32 ndeps_noalias,
346  kmp_depend_info_t *noalias_dep_list) {
347 
348  kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
349  KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
350  loc_ref, new_taskdata));
351 
352  kmp_info_t *thread = __kmp_threads[gtid];
353  kmp_taskdata_t *current_task = thread->th.th_current_task;
354 
355 #if OMPT_SUPPORT
356  if (ompt_enabled.enabled) {
357  OMPT_STORE_RETURN_ADDRESS(gtid);
358  if (!current_task->ompt_task_info.frame.enter_frame)
359  current_task->ompt_task_info.frame.enter_frame =
360  OMPT_GET_FRAME_ADDRESS(1);
361  if (ompt_enabled.ompt_callback_task_create) {
362  ompt_data_t task_data = ompt_data_none;
363  ompt_callbacks.ompt_callback(ompt_callback_task_create)(
364  current_task ? &(current_task->ompt_task_info.task_data) : &task_data,
365  current_task ? &(current_task->ompt_task_info.frame) : NULL,
366  &(new_taskdata->ompt_task_info.task_data),
367  ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
368  OMPT_LOAD_RETURN_ADDRESS(gtid));
369  }
370 
371  new_taskdata->ompt_task_info.frame.enter_frame = OMPT_GET_FRAME_ADDRESS(0);
372  }
373 
374 #if OMPT_OPTIONAL
375  /* OMPT grab all dependences if requested by the tool */
376  if (ndeps + ndeps_noalias > 0 &&
377  ompt_enabled.ompt_callback_task_dependences) {
378  kmp_int32 i;
379 
380  new_taskdata->ompt_task_info.ndeps = ndeps + ndeps_noalias;
381  new_taskdata->ompt_task_info.deps =
382  (ompt_task_dependence_t *)KMP_OMPT_DEPS_ALLOC(
383  thread, (ndeps + ndeps_noalias) * sizeof(ompt_task_dependence_t));
384 
385  KMP_ASSERT(new_taskdata->ompt_task_info.deps != NULL);
386 
387  for (i = 0; i < ndeps; i++) {
388  new_taskdata->ompt_task_info.deps[i].variable_addr =
389  (void *)dep_list[i].base_addr;
390  if (dep_list[i].flags.in && dep_list[i].flags.out)
391  new_taskdata->ompt_task_info.deps[i].dependence_type =
392  ompt_task_dependence_type_inout;
393  else if (dep_list[i].flags.out)
394  new_taskdata->ompt_task_info.deps[i].dependence_type =
395  ompt_task_dependence_type_out;
396  else if (dep_list[i].flags.in)
397  new_taskdata->ompt_task_info.deps[i].dependence_type =
398  ompt_task_dependence_type_in;
399  }
400  for (i = 0; i < ndeps_noalias; i++) {
401  new_taskdata->ompt_task_info.deps[ndeps + i].variable_addr =
402  (void *)noalias_dep_list[i].base_addr;
403  if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
404  new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
405  ompt_task_dependence_type_inout;
406  else if (noalias_dep_list[i].flags.out)
407  new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
408  ompt_task_dependence_type_out;
409  else if (noalias_dep_list[i].flags.in)
410  new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
411  ompt_task_dependence_type_in;
412  }
413  ompt_callbacks.ompt_callback(ompt_callback_task_dependences)(
414  &(new_taskdata->ompt_task_info.task_data),
415  new_taskdata->ompt_task_info.deps, new_taskdata->ompt_task_info.ndeps);
416  /* We can now free the allocated memory for the dependencies */
417  /* For OMPD we might want to delay the free until task_end */
418  KMP_OMPT_DEPS_FREE(thread, new_taskdata->ompt_task_info.deps);
419  new_taskdata->ompt_task_info.deps = NULL;
420  new_taskdata->ompt_task_info.ndeps = 0;
421  }
422 #endif /* OMPT_OPTIONAL */
423 #endif /* OMPT_SUPPORT */
424 
425  bool serial = current_task->td_flags.team_serial ||
426  current_task->td_flags.tasking_ser ||
427  current_task->td_flags.final;
428 #if OMP_45_ENABLED
429  kmp_task_team_t *task_team = thread->th.th_task_team;
430  serial = serial && !(task_team && task_team->tt.tt_found_proxy_tasks);
431 #endif
432 
433  if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
434  /* if no dependencies have been tracked yet, create the dependence hash */
435  if (current_task->td_dephash == NULL)
436  current_task->td_dephash = __kmp_dephash_create(thread, current_task);
437 
438 #if USE_FAST_MEMORY
439  kmp_depnode_t *node =
440  (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t));
441 #else
442  kmp_depnode_t *node =
443  (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t));
444 #endif
445 
446  __kmp_init_node(node);
447  new_taskdata->td_depnode = node;
448 
449  if (__kmp_check_deps(gtid, node, new_task, current_task->td_dephash,
450  NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
451  noalias_dep_list)) {
452  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
453  "dependencies: "
454  "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
455  gtid, loc_ref, new_taskdata));
456 #if OMPT_SUPPORT
457  if (ompt_enabled.enabled) {
458  current_task->ompt_task_info.frame.enter_frame = NULL;
459  }
460 #endif
461  return TASK_CURRENT_NOT_QUEUED;
462  }
463  } else {
464  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies "
465  "for task (serialized)"
466  "loc=%p task=%p\n",
467  gtid, loc_ref, new_taskdata));
468  }
469 
470  KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
471  "dependencies : "
472  "loc=%p task=%p, transferring to __kmpc_omp_task\n",
473  gtid, loc_ref, new_taskdata));
474 
475  kmp_int32 ret = __kmp_omp_task(gtid, new_task, true);
476 #if OMPT_SUPPORT
477  if (ompt_enabled.enabled) {
478  current_task->ompt_task_info.frame.enter_frame = NULL;
479  }
480 #endif
481  return ret;
482 }
483 
495 void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps,
496  kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
497  kmp_depend_info_t *noalias_dep_list) {
498  KA_TRACE(10, ("__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
499 
500  if (ndeps == 0 && ndeps_noalias == 0) {
501  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no dependencies to "
502  "wait upon : loc=%p\n",
503  gtid, loc_ref));
504  return;
505  }
506 
507  kmp_info_t *thread = __kmp_threads[gtid];
508  kmp_taskdata_t *current_task = thread->th.th_current_task;
509 
510  // We can return immediately as:
511  // - dependences are not computed in serial teams (except with proxy tasks)
512  // - if the dephash is not yet created it means we have nothing to wait for
513  bool ignore = current_task->td_flags.team_serial ||
514  current_task->td_flags.tasking_ser ||
515  current_task->td_flags.final;
516 #if OMP_45_ENABLED
517  ignore = ignore && thread->th.th_task_team != NULL &&
518  thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
519 #endif
520  ignore = ignore || current_task->td_dephash == NULL;
521 
522  if (ignore) {
523  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
524  "dependencies : loc=%p\n",
525  gtid, loc_ref));
526  return;
527  }
528 
529  kmp_depnode_t node = {0};
530  __kmp_init_node(&node);
531 
532  if (!__kmp_check_deps(gtid, &node, NULL, current_task->td_dephash,
533  DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
534  noalias_dep_list)) {
535  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d has no blocking "
536  "dependencies : loc=%p\n",
537  gtid, loc_ref));
538  return;
539  }
540 
541  int thread_finished = FALSE;
542  kmp_flag_32 flag((std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
543  while (node.dn.npredecessors > 0) {
544  flag.execute_tasks(thread, gtid, FALSE,
545  &thread_finished USE_ITT_BUILD_ARG(NULL),
546  __kmp_task_stealing_constraint);
547  }
548 
549  KA_TRACE(10, ("__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",
550  gtid, loc_ref));
551 }
552 
553 #endif /* OMP_40_ENABLED */
void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *new_task, kmp_int32 ndeps, kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, kmp_depend_info_t *noalias_dep_list)
Definition: kmp.h:219