24 extern kmp_omp_struct_info_t __kmp_omp_debug_struct_info;
28 int __kmp_debugging = FALSE;
30 #define offset_and_size_of(structure, field) \ 31 { offsetof(structure, field), sizeof(((structure *)NULL)->field) } 33 #define offset_and_size_not_available \ 36 #define addr_and_size_of(var) \ 37 { (kmp_uint64)(&var), sizeof(var) } 39 #define nthr_buffer_size 1024 40 static kmp_int32 kmp_omp_nthr_info_buffer[nthr_buffer_size] = {
41 nthr_buffer_size *
sizeof(kmp_int32)};
44 static char func_microtask[] =
"__kmp_invoke_microtask";
45 static char func_fork[] =
"__kmpc_fork_call";
46 static char func_fork_teams[] =
"__kmpc_fork_teams";
49 kmp_omp_struct_info_t __kmp_omp_debug_struct_info = {
57 sizeof(kmp_omp_struct_info_t),
60 addr_and_size_of(__kmp_version_major),
61 addr_and_size_of(__kmp_version_minor),
62 addr_and_size_of(__kmp_version_build),
63 addr_and_size_of(__kmp_openmp_version),
64 {(kmp_uint64)(__kmp_copyright) + KMP_VERSION_MAGIC_LEN,
68 addr_and_size_of(__kmp_threads),
69 addr_and_size_of(__kmp_root),
70 addr_and_size_of(__kmp_threads_capacity),
72 addr_and_size_of(__kmp_monitor),
74 #if !KMP_USE_DYNAMIC_LOCK 75 addr_and_size_of(__kmp_user_lock_table),
77 addr_and_size_of(func_microtask),
78 addr_and_size_of(func_fork),
79 addr_and_size_of(func_fork_teams),
80 addr_and_size_of(__kmp_team_counter),
81 addr_and_size_of(__kmp_task_counter),
82 addr_and_size_of(kmp_omp_nthr_info_buffer),
84 OMP_LOCK_T_SIZE <
sizeof(
void *),
86 INITIAL_TASK_DEQUE_SIZE,
89 sizeof(kmp_base_info_t),
90 offset_and_size_of(kmp_base_info_t, th_info),
91 offset_and_size_of(kmp_base_info_t, th_team),
92 offset_and_size_of(kmp_base_info_t, th_root),
93 offset_and_size_of(kmp_base_info_t, th_serial_team),
94 offset_and_size_of(kmp_base_info_t, th_ident),
95 offset_and_size_of(kmp_base_info_t, th_spin_here),
96 offset_and_size_of(kmp_base_info_t, th_next_waiting),
97 offset_and_size_of(kmp_base_info_t, th_task_team),
98 offset_and_size_of(kmp_base_info_t, th_current_task),
99 offset_and_size_of(kmp_base_info_t, th_task_state),
100 offset_and_size_of(kmp_base_info_t, th_bar),
101 offset_and_size_of(kmp_bstate_t, b_worker_arrived),
105 offset_and_size_of(kmp_base_info_t, th_teams_microtask),
106 offset_and_size_of(kmp_base_info_t, th_teams_level),
107 offset_and_size_of(kmp_teams_size_t, nteams),
108 offset_and_size_of(kmp_teams_size_t, nth),
112 sizeof(kmp_desc_base_t),
113 offset_and_size_of(kmp_desc_base_t, ds_tid),
114 offset_and_size_of(kmp_desc_base_t, ds_gtid),
118 offset_and_size_of(kmp_desc_base_t, ds_thread_id),
120 offset_and_size_of(kmp_desc_base_t, ds_thread),
124 sizeof(kmp_base_team_t),
125 offset_and_size_of(kmp_base_team_t, t_master_tid),
126 offset_and_size_of(kmp_base_team_t, t_ident),
127 offset_and_size_of(kmp_base_team_t, t_parent),
128 offset_and_size_of(kmp_base_team_t, t_nproc),
129 offset_and_size_of(kmp_base_team_t, t_threads),
130 offset_and_size_of(kmp_base_team_t, t_serialized),
131 offset_and_size_of(kmp_base_team_t, t_id),
132 offset_and_size_of(kmp_base_team_t, t_pkfn),
133 offset_and_size_of(kmp_base_team_t, t_task_team),
134 offset_and_size_of(kmp_base_team_t, t_implicit_task_taskdata),
136 offset_and_size_of(kmp_base_team_t, t_cancel_request),
138 offset_and_size_of(kmp_base_team_t, t_bar),
139 offset_and_size_of(kmp_balign_team_t, b_master_arrived),
140 offset_and_size_of(kmp_balign_team_t, b_team_arrived),
143 sizeof(kmp_base_root_t),
144 offset_and_size_of(kmp_base_root_t, r_root_team),
145 offset_and_size_of(kmp_base_root_t, r_hot_team),
146 offset_and_size_of(kmp_base_root_t, r_uber_thread),
147 offset_and_size_not_available,
151 offset_and_size_of(
ident_t, psource),
152 offset_and_size_of(
ident_t, flags),
155 sizeof(kmp_base_queuing_lock_t),
156 offset_and_size_of(kmp_base_queuing_lock_t, initialized),
157 offset_and_size_of(kmp_base_queuing_lock_t, location),
158 offset_and_size_of(kmp_base_queuing_lock_t, tail_id),
159 offset_and_size_of(kmp_base_queuing_lock_t, head_id),
160 offset_and_size_of(kmp_base_queuing_lock_t, next_ticket),
161 offset_and_size_of(kmp_base_queuing_lock_t, now_serving),
162 offset_and_size_of(kmp_base_queuing_lock_t, owner_id),
163 offset_and_size_of(kmp_base_queuing_lock_t, depth_locked),
164 offset_and_size_of(kmp_base_queuing_lock_t, flags),
166 #if !KMP_USE_DYNAMIC_LOCK 168 sizeof(kmp_lock_table_t),
169 offset_and_size_of(kmp_lock_table_t, used),
170 offset_and_size_of(kmp_lock_table_t, allocated),
171 offset_and_size_of(kmp_lock_table_t, table),
175 sizeof(kmp_base_task_team_t),
176 offset_and_size_of(kmp_base_task_team_t, tt_threads_data),
177 offset_and_size_of(kmp_base_task_team_t, tt_found_tasks),
178 offset_and_size_of(kmp_base_task_team_t, tt_nproc),
179 offset_and_size_of(kmp_base_task_team_t, tt_unfinished_threads),
180 offset_and_size_of(kmp_base_task_team_t, tt_active),
183 sizeof(kmp_taskdata_t),
184 offset_and_size_of(kmp_taskdata_t, td_task_id),
185 offset_and_size_of(kmp_taskdata_t, td_flags),
186 offset_and_size_of(kmp_taskdata_t, td_team),
187 offset_and_size_of(kmp_taskdata_t, td_parent),
188 offset_and_size_of(kmp_taskdata_t, td_level),
189 offset_and_size_of(kmp_taskdata_t, td_ident),
190 offset_and_size_of(kmp_taskdata_t, td_allocated_child_tasks),
191 offset_and_size_of(kmp_taskdata_t, td_incomplete_child_tasks),
193 offset_and_size_of(kmp_taskdata_t, td_taskwait_ident),
194 offset_and_size_of(kmp_taskdata_t, td_taskwait_counter),
195 offset_and_size_of(kmp_taskdata_t, td_taskwait_thread),
198 offset_and_size_of(kmp_taskdata_t, td_taskgroup),
199 offset_and_size_of(kmp_taskgroup_t, count),
200 offset_and_size_of(kmp_taskgroup_t, cancel_request),
202 offset_and_size_of(kmp_taskdata_t, td_depnode),
203 offset_and_size_of(kmp_depnode_list_t, node),
204 offset_and_size_of(kmp_depnode_list_t, next),
205 offset_and_size_of(kmp_base_depnode_t, successors),
206 offset_and_size_of(kmp_base_depnode_t, task),
207 offset_and_size_of(kmp_base_depnode_t, npredecessors),
208 offset_and_size_of(kmp_base_depnode_t, nrefs),
210 offset_and_size_of(kmp_task_t, routine),
213 sizeof(kmp_thread_data_t),
214 offset_and_size_of(kmp_base_thread_data_t, td_deque),
215 offset_and_size_of(kmp_base_thread_data_t, td_deque_size),
216 offset_and_size_of(kmp_base_thread_data_t, td_deque_head),
217 offset_and_size_of(kmp_base_thread_data_t, td_deque_tail),
218 offset_and_size_of(kmp_base_thread_data_t, td_deque_ntasks),
219 offset_and_size_of(kmp_base_thread_data_t, td_deque_last_stolen),
226 #undef offset_and_size_of 227 #undef addr_and_size_of 233 static inline void *__kmp_convert_to_ptr(kmp_uint64 addr) {
235 #pragma warning(push) 236 #pragma warning(disable : 810) // conversion from "unsigned long long" to "char 238 #pragma warning(disable : 1195) // conversion from integer to smaller pointer 239 #endif // KMP_COMPILER_ICC 243 #endif // KMP_COMPILER_ICC 246 static int kmp_location_match(kmp_str_loc_t *loc, kmp_omp_nthr_item_t *item) {
252 char *file = (
char *)__kmp_convert_to_ptr(item->file);
253 char *func = (
char *)__kmp_convert_to_ptr(item->func);
254 file_match = __kmp_str_fname_match(&loc->fname, file);
257 || strcmp(func,
"*") == 0 ||
258 (loc->func != NULL && strcmp(loc->func, func) == 0);
260 item->begin <= loc->line &&
262 loc->line <= item->end);
264 return (file_match && func_match && line_match);
272 kmp_omp_nthr_info_t *info = (kmp_omp_nthr_info_t *)__kmp_convert_to_ptr(
273 __kmp_omp_debug_struct_info.nthr_info.addr);
274 if (info->num > 0 && info->array != 0) {
275 kmp_omp_nthr_item_t *items =
276 (kmp_omp_nthr_item_t *)__kmp_convert_to_ptr(info->array);
277 kmp_str_loc_t loc = __kmp_str_loc_init(ident->
psource, 1);
279 for (i = 0; i < info->num; ++i) {
280 if (kmp_location_match(&loc, &items[i])) {
281 num_threads = items[i].num_threads;
284 __kmp_str_loc_free(&loc);