LLVM OpenMP* Runtime Library
kmp_taskdeps.h
1 /*
2  * kmp_taskdeps.h
3  */
4 
5 
6 //===----------------------------------------------------------------------===//
7 //
8 // The LLVM Compiler Infrastructure
9 //
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 
16 #ifndef KMP_TASKDEPS_H
17 #define KMP_TASKDEPS_H
18 
19 #include "kmp.h"
20 
21 #if OMP_40_ENABLED
22 
23 #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid))
24 #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid))
25 
26 static inline void __kmp_node_deref(kmp_info_t *thread, kmp_depnode_t *node) {
27  if (!node)
28  return;
29 
30  kmp_int32 n = KMP_ATOMIC_DEC(&node->dn.nrefs) - 1;
31  if (n == 0) {
32  KMP_ASSERT(node->dn.nrefs == 0);
33 #if USE_FAST_MEMORY
34  __kmp_fast_free(thread, node);
35 #else
36  __kmp_thread_free(thread, node);
37 #endif
38  }
39 }
40 
41 static inline void __kmp_depnode_list_free(kmp_info_t *thread,
42  kmp_depnode_list *list) {
43  kmp_depnode_list *next;
44 
45  for (; list; list = next) {
46  next = list->next;
47 
48  __kmp_node_deref(thread, list->node);
49 #if USE_FAST_MEMORY
50  __kmp_fast_free(thread, list);
51 #else
52  __kmp_thread_free(thread, list);
53 #endif
54  }
55 }
56 
57 static inline void __kmp_dephash_free_entries(kmp_info_t *thread,
58  kmp_dephash_t *h) {
59  for (size_t i = 0; i < h->size; i++) {
60  if (h->buckets[i]) {
61  kmp_dephash_entry_t *next;
62  for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) {
63  next = entry->next_in_bucket;
64  __kmp_depnode_list_free(thread, entry->last_ins);
65  __kmp_node_deref(thread, entry->last_out);
66 #if USE_FAST_MEMORY
67  __kmp_fast_free(thread, entry);
68 #else
69  __kmp_thread_free(thread, entry);
70 #endif
71  }
72  h->buckets[i] = 0;
73  }
74  }
75 }
76 
77 static inline void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h) {
78  __kmp_dephash_free_entries(thread, h);
79 #if USE_FAST_MEMORY
80  __kmp_fast_free(thread, h);
81 #else
82  __kmp_thread_free(thread, h);
83 #endif
84 }
85 
86 static inline void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) {
87  kmp_info_t *thread = __kmp_threads[gtid];
88  kmp_depnode_t *node = task->td_depnode;
89 
90  if (task->td_dephash) {
91  KA_TRACE(
92  40, ("__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n",
93  gtid, task));
94  __kmp_dephash_free(thread, task->td_dephash);
95  task->td_dephash = NULL;
96  }
97 
98  if (!node)
99  return;
100 
101  KA_TRACE(20, ("__kmp_release_deps: T#%d notifying successors of task %p.\n",
102  gtid, task));
103 
104  KMP_ACQUIRE_DEPNODE(gtid, node);
105  node->dn.task =
106  NULL; // mark this task as finished, so no new dependencies are generated
107  KMP_RELEASE_DEPNODE(gtid, node);
108 
109  kmp_depnode_list_t *next;
110  for (kmp_depnode_list_t *p = node->dn.successors; p; p = next) {
111  kmp_depnode_t *successor = p->node;
112  kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->dn.npredecessors) - 1;
113 
114  // successor task can be NULL for wait_depends or because deps are still
115  // being processed
116  if (npredecessors == 0) {
117  KMP_MB();
118  if (successor->dn.task) {
119  KA_TRACE(20, ("__kmp_release_deps: T#%d successor %p of %p scheduled "
120  "for execution.\n",
121  gtid, successor->dn.task, task));
122  __kmp_omp_task(gtid, successor->dn.task, false);
123  }
124  }
125 
126  next = p->next;
127  __kmp_node_deref(thread, p->node);
128 #if USE_FAST_MEMORY
129  __kmp_fast_free(thread, p);
130 #else
131  __kmp_thread_free(thread, p);
132 #endif
133  }
134 
135  __kmp_node_deref(thread, node);
136 
137  KA_TRACE(
138  20,
139  ("__kmp_release_deps: T#%d all successors of %p notified of completion\n",
140  gtid, task));
141 }
142 
143 #endif // OMP_40_ENABLED
144 
145 #endif // KMP_TASKDEPS_H