LLVM OpenMP* Runtime Library
kmp_cancel.cpp
1 
2 //===----------------------------------------------------------------------===//
3 //
4 // The LLVM Compiler Infrastructure
5 //
6 // This file is dual licensed under the MIT and the University of Illinois Open
7 // Source Licenses. See LICENSE.txt for details.
8 //
9 //===----------------------------------------------------------------------===//
10 
11 #include "kmp.h"
12 #include "kmp_i18n.h"
13 #include "kmp_io.h"
14 #include "kmp_str.h"
15 #if OMPT_SUPPORT
16 #include "ompt-specific.h"
17 #endif
18 
19 #if OMP_40_ENABLED
20 
32 kmp_int32 __kmpc_cancel(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 cncl_kind) {
33  kmp_info_t *this_thr = __kmp_threads[gtid];
34 
35  KC_TRACE(10, ("__kmpc_cancel: T#%d request %d OMP_CANCELLATION=%d\n", gtid,
36  cncl_kind, __kmp_omp_cancellation));
37 
38  KMP_DEBUG_ASSERT(cncl_kind != cancel_noreq);
39  KMP_DEBUG_ASSERT(cncl_kind == cancel_parallel || cncl_kind == cancel_loop ||
40  cncl_kind == cancel_sections ||
41  cncl_kind == cancel_taskgroup);
42  KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);
43 
44  if (__kmp_omp_cancellation) {
45  switch (cncl_kind) {
46  case cancel_parallel:
47  case cancel_loop:
48  case cancel_sections:
49  // cancellation requests for parallel and worksharing constructs
50  // are handled through the team structure
51  {
52  kmp_team_t *this_team = this_thr->th.th_team;
53  KMP_DEBUG_ASSERT(this_team);
54  kmp_int32 old = cancel_noreq;
55  this_team->t.t_cancel_request.compare_exchange_strong(old, cncl_kind);
56  if (old == cancel_noreq || old == cncl_kind) {
57 // we do not have a cancellation request in this team or we do have
58 // one that matches the current request -> cancel
59 #if OMPT_SUPPORT && OMPT_OPTIONAL
60  if (ompt_enabled.ompt_callback_cancel) {
61  ompt_data_t *task_data;
62  __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
63  NULL);
64  ompt_cancel_flag_t type = ompt_cancel_parallel;
65  if (cncl_kind == cancel_parallel)
66  type = ompt_cancel_parallel;
67  else if (cncl_kind == cancel_loop)
68  type = ompt_cancel_loop;
69  else if (cncl_kind == cancel_sections)
70  type = ompt_cancel_sections;
71  ompt_callbacks.ompt_callback(ompt_callback_cancel)(
72  task_data, type | ompt_cancel_activated,
73  OMPT_GET_RETURN_ADDRESS(0));
74  }
75 #endif
76  return 1 /* true */;
77  }
78  break;
79  }
80  case cancel_taskgroup:
81  // cancellation requests for a task group
82  // are handled through the taskgroup structure
83  {
84  kmp_taskdata_t *task;
85  kmp_taskgroup_t *taskgroup;
86 
87  task = this_thr->th.th_current_task;
88  KMP_DEBUG_ASSERT(task);
89 
90  taskgroup = task->td_taskgroup;
91  if (taskgroup) {
92  kmp_int32 old = cancel_noreq;
93  taskgroup->cancel_request.compare_exchange_strong(old, cncl_kind);
94  if (old == cancel_noreq || old == cncl_kind) {
95 // we do not have a cancellation request in this taskgroup or we do
96 // have one that matches the current request -> cancel
97 #if OMPT_SUPPORT && OMPT_OPTIONAL
98  if (ompt_enabled.ompt_callback_cancel) {
99  ompt_data_t *task_data;
100  __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
101  NULL);
102  ompt_callbacks.ompt_callback(ompt_callback_cancel)(
103  task_data, ompt_cancel_taskgroup | ompt_cancel_activated,
104  OMPT_GET_RETURN_ADDRESS(0));
105  }
106 #endif
107  return 1 /* true */;
108  }
109  } else {
110  // TODO: what needs to happen here?
111  // the specification disallows cancellation w/o taskgroups
112  // so we might do anything here, let's abort for now
113  KMP_ASSERT(0 /* false */);
114  }
115  }
116  break;
117  default:
118  KMP_ASSERT(0 /* false */);
119  }
120  }
121 
122  // ICV OMP_CANCELLATION=false, so we ignored this cancel request
123  KMP_DEBUG_ASSERT(!__kmp_omp_cancellation);
124  return 0 /* false */;
125 }
126 
138 kmp_int32 __kmpc_cancellationpoint(ident_t *loc_ref, kmp_int32 gtid,
139  kmp_int32 cncl_kind) {
140  kmp_info_t *this_thr = __kmp_threads[gtid];
141 
142  KC_TRACE(10,
143  ("__kmpc_cancellationpoint: T#%d request %d OMP_CANCELLATION=%d\n",
144  gtid, cncl_kind, __kmp_omp_cancellation));
145 
146  KMP_DEBUG_ASSERT(cncl_kind != cancel_noreq);
147  KMP_DEBUG_ASSERT(cncl_kind == cancel_parallel || cncl_kind == cancel_loop ||
148  cncl_kind == cancel_sections ||
149  cncl_kind == cancel_taskgroup);
150  KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);
151 
152  if (__kmp_omp_cancellation) {
153  switch (cncl_kind) {
154  case cancel_parallel:
155  case cancel_loop:
156  case cancel_sections:
157  // cancellation requests for parallel and worksharing constructs
158  // are handled through the team structure
159  {
160  kmp_team_t *this_team = this_thr->th.th_team;
161  KMP_DEBUG_ASSERT(this_team);
162  if (this_team->t.t_cancel_request) {
163  if (cncl_kind == this_team->t.t_cancel_request) {
164 // the request in the team structure matches the type of
165 // cancellation point so we can cancel
166 #if OMPT_SUPPORT && OMPT_OPTIONAL
167  if (ompt_enabled.ompt_callback_cancel) {
168  ompt_data_t *task_data;
169  __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
170  NULL);
171  ompt_cancel_flag_t type = ompt_cancel_parallel;
172  if (cncl_kind == cancel_parallel)
173  type = ompt_cancel_parallel;
174  else if (cncl_kind == cancel_loop)
175  type = ompt_cancel_loop;
176  else if (cncl_kind == cancel_sections)
177  type = ompt_cancel_sections;
178  ompt_callbacks.ompt_callback(ompt_callback_cancel)(
179  task_data, type | ompt_cancel_detected,
180  OMPT_GET_RETURN_ADDRESS(0));
181  }
182 #endif
183  return 1 /* true */;
184  }
185  KMP_ASSERT(0 /* false */);
186  } else {
187  // we do not have a cancellation request pending, so we just
188  // ignore this cancellation point
189  return 0;
190  }
191  break;
192  }
193  case cancel_taskgroup:
194  // cancellation requests for a task group
195  // are handled through the taskgroup structure
196  {
197  kmp_taskdata_t *task;
198  kmp_taskgroup_t *taskgroup;
199 
200  task = this_thr->th.th_current_task;
201  KMP_DEBUG_ASSERT(task);
202 
203  taskgroup = task->td_taskgroup;
204  if (taskgroup) {
205 // return the current status of cancellation for the taskgroup
206 #if OMPT_SUPPORT && OMPT_OPTIONAL
207  if (ompt_enabled.ompt_callback_cancel &&
208  !!taskgroup->cancel_request) {
209  ompt_data_t *task_data;
210  __ompt_get_task_info_internal(0, NULL, &task_data, NULL, NULL,
211  NULL);
212  ompt_callbacks.ompt_callback(ompt_callback_cancel)(
213  task_data, ompt_cancel_taskgroup | ompt_cancel_detected,
214  OMPT_GET_RETURN_ADDRESS(0));
215  }
216 #endif
217  return !!taskgroup->cancel_request;
218  } else {
219  // if a cancellation point is encountered by a task that does not
220  // belong to a taskgroup, it is OK to ignore it
221  return 0 /* false */;
222  }
223  }
224  default:
225  KMP_ASSERT(0 /* false */);
226  }
227  }
228 
229  // ICV OMP_CANCELLATION=false, so we ignore the cancellation point
230  KMP_DEBUG_ASSERT(!__kmp_omp_cancellation);
231  return 0 /* false */;
232 }
233 
246 kmp_int32 __kmpc_cancel_barrier(ident_t *loc, kmp_int32 gtid) {
247  int ret = 0 /* false */;
248  kmp_info_t *this_thr = __kmp_threads[gtid];
249  kmp_team_t *this_team = this_thr->th.th_team;
250 
251  KMP_DEBUG_ASSERT(__kmp_get_gtid() == gtid);
252 
253  // call into the standard barrier
254  __kmpc_barrier(loc, gtid);
255 
256  // if cancellation is active, check cancellation flag
257  if (__kmp_omp_cancellation) {
258  // depending on which construct to cancel, check the flag and
259  // reset the flag
260  switch (KMP_ATOMIC_LD_RLX(&(this_team->t.t_cancel_request))) {
261  case cancel_parallel:
262  ret = 1;
263  // ensure that threads have checked the flag, when
264  // leaving the above barrier
265  __kmpc_barrier(loc, gtid);
266  this_team->t.t_cancel_request = cancel_noreq;
267  // the next barrier is the fork/join barrier, which
268  // synchronizes the threads leaving here
269  break;
270  case cancel_loop:
271  case cancel_sections:
272  ret = 1;
273  // ensure that threads have checked the flag, when
274  // leaving the above barrier
275  __kmpc_barrier(loc, gtid);
276  this_team->t.t_cancel_request = cancel_noreq;
277  // synchronize the threads again to make sure we do not have any run-away
278  // threads that cause a race on the cancellation flag
279  __kmpc_barrier(loc, gtid);
280  break;
281  case cancel_taskgroup:
282  // this case should not occur
283  KMP_ASSERT(0 /* false */);
284  break;
285  case cancel_noreq:
286  // do nothing
287  break;
288  default:
289  KMP_ASSERT(0 /* false */);
290  }
291  }
292 
293  return ret;
294 }
295 
312 int __kmp_get_cancellation_status(int cancel_kind) {
313  if (__kmp_omp_cancellation) {
314  kmp_info_t *this_thr = __kmp_entry_thread();
315 
316  switch (cancel_kind) {
317  case cancel_parallel:
318  case cancel_loop:
319  case cancel_sections: {
320  kmp_team_t *this_team = this_thr->th.th_team;
321  return this_team->t.t_cancel_request == cancel_kind;
322  }
323  case cancel_taskgroup: {
324  kmp_taskdata_t *task;
325  kmp_taskgroup_t *taskgroup;
326  task = this_thr->th.th_current_task;
327  taskgroup = task->td_taskgroup;
328  return taskgroup && taskgroup->cancel_request;
329  }
330  }
331  }
332 
333  return 0 /* false */;
334 }
335 
336 #endif
Definition: kmp.h:224
KMP_EXPORT void __kmpc_barrier(ident_t *, kmp_int32 global_tid)