13 #ifndef KMP_TASKDEPS_H
14 #define KMP_TASKDEPS_H
18 #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid))
19 #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid))
21 static inline void __kmp_node_deref(kmp_info_t *thread, kmp_depnode_t *node) {
25 kmp_int32 n = KMP_ATOMIC_DEC(&node->dn.nrefs) - 1;
29 KMP_ASSERT(node->dn.nrefs == 0);
31 __kmp_fast_free(thread, node);
33 __kmp_thread_free(thread, node);
38 static inline void __kmp_depnode_list_free(kmp_info_t *thread,
39 kmp_depnode_list *list) {
40 kmp_depnode_list *next;
42 for (; list; list = next) {
45 __kmp_node_deref(thread, list->node);
47 __kmp_fast_free(thread, list);
49 __kmp_thread_free(thread, list);
54 static inline void __kmp_dephash_free_entries(kmp_info_t *thread,
56 for (
size_t i = 0; i < h->size; i++) {
58 kmp_dephash_entry_t *next;
59 for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) {
60 next = entry->next_in_bucket;
61 __kmp_depnode_list_free(thread, entry->last_set);
62 __kmp_depnode_list_free(thread, entry->prev_set);
63 __kmp_node_deref(thread, entry->last_out);
64 if (entry->mtx_lock) {
65 __kmp_destroy_lock(entry->mtx_lock);
66 __kmp_free(entry->mtx_lock);
69 __kmp_fast_free(thread, entry);
71 __kmp_thread_free(thread, entry);
79 static inline void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h) {
80 __kmp_dephash_free_entries(thread, h);
82 __kmp_fast_free(thread, h);
84 __kmp_thread_free(thread, h);
88 extern void __kmpc_give_task(kmp_task_t *ptask, kmp_int32 start);
90 static inline void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) {
91 kmp_info_t *thread = __kmp_threads[gtid];
92 kmp_depnode_t *node = task->td_depnode;
95 if (UNLIKELY(node && (node->dn.mtx_num_locks < 0))) {
97 node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
98 for (
int i = node->dn.mtx_num_locks - 1; i >= 0; --i) {
99 KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
100 __kmp_release_lock(node->dn.mtx_locks[i], gtid);
104 if (task->td_dephash) {
106 40, (
"__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n",
108 __kmp_dephash_free(thread, task->td_dephash);
109 task->td_dephash = NULL;
115 KA_TRACE(20, (
"__kmp_release_deps: T#%d notifying successors of task %p.\n",
118 KMP_ACQUIRE_DEPNODE(gtid, node);
121 KMP_RELEASE_DEPNODE(gtid, node);
123 kmp_depnode_list_t *next;
124 kmp_taskdata_t *next_taskdata;
125 for (kmp_depnode_list_t *p = node->dn.successors; p; p = next) {
126 kmp_depnode_t *successor = p->node;
127 kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->dn.npredecessors) - 1;
131 if (npredecessors == 0) {
133 if (successor->dn.task) {
134 KA_TRACE(20, (
"__kmp_release_deps: T#%d successor %p of %p scheduled "
136 gtid, successor->dn.task, task));
140 if (KMP_HIDDEN_HELPER_THREAD(gtid)) {
142 KMP_ASSERT(task->td_flags.hidden_helper);
143 next_taskdata = KMP_TASK_TO_TASKDATA(successor->dn.task);
147 if (!next_taskdata->td_flags.hidden_helper) {
150 __kmp_tid_from_gtid(next_taskdata->encountering_gtid));
152 __kmp_omp_task(gtid, successor->dn.task,
false);
155 __kmp_omp_task(gtid, successor->dn.task,
false);
161 __kmp_node_deref(thread, p->node);
163 __kmp_fast_free(thread, p);
165 __kmp_thread_free(thread, p);
169 __kmp_node_deref(thread, node);
173 (
"__kmp_release_deps: T#%d all successors of %p notified of completion\n",