23 #define USE_CHECKS_COMMON 25 #define KMP_INLINE_SUBR 1 32 kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size );
33 struct private_common *
34 kmp_threadprivate_insert(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size );
36 struct shared_table __kmp_threadprivate_d_table;
42 #ifdef KMP_INLINE_SUBR 45 struct private_common *
46 __kmp_threadprivate_find_task_common(
struct common_table *tbl,
int gtid,
void *pc_addr )
49 struct private_common *tn;
51 #ifdef KMP_TASK_COMMON_DEBUG 52 KC_TRACE( 10, (
"__kmp_threadprivate_find_task_common: thread#%d, called with address %p\n",
57 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
58 if (tn->gbl_addr == pc_addr) {
59 #ifdef KMP_TASK_COMMON_DEBUG 60 KC_TRACE( 10, (
"__kmp_threadprivate_find_task_common: thread#%d, found node %p on list\n",
70 #ifdef KMP_INLINE_SUBR 73 struct shared_common *
74 __kmp_find_shared_task_common(
struct shared_table *tbl,
int gtid,
void *pc_addr )
76 struct shared_common *tn;
78 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
79 if (tn->gbl_addr == pc_addr) {
80 #ifdef KMP_TASK_COMMON_DEBUG 81 KC_TRACE( 10, (
"__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
97 static struct private_data *
98 __kmp_init_common_data(
void *pc_addr,
size_t pc_size )
100 struct private_data *d;
104 d = (
struct private_data *) __kmp_allocate(
sizeof(
struct private_data ) );
114 for (i = pc_size; i > 0; --i) {
116 d->data = __kmp_allocate( pc_size );
117 KMP_MEMCPY( d->data, pc_addr, pc_size );
130 __kmp_copy_common_data(
void *pc_addr,
struct private_data *d )
132 char *addr = (
char *) pc_addr;
135 for (offset = 0; d != 0; d = d->next) {
136 for (i = d->more; i > 0; --i) {
138 memset( & addr[ offset ],
'\0', d->size );
140 KMP_MEMCPY( & addr[ offset ], d->data, d->size );
151 __kmp_common_initialize(
void )
153 if( ! TCR_4(__kmp_init_common) ) {
159 __kmp_threadpriv_cache_list = NULL;
163 for(gtid = 0 ; gtid < __kmp_threads_capacity; gtid++ )
164 if( __kmp_root[gtid] ) {
165 KMP_DEBUG_ASSERT( __kmp_root[gtid]->r.r_uber_thread );
166 for ( q = 0; q< KMP_HASH_TABLE_SIZE; ++q)
167 KMP_DEBUG_ASSERT( !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q] );
172 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
173 __kmp_threadprivate_d_table.data[ q ] = 0;
175 TCW_4(__kmp_init_common, TRUE);
182 __kmp_common_destroy(
void )
184 if( TCR_4(__kmp_init_common) ) {
187 TCW_4(__kmp_init_common, FALSE);
189 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
191 struct private_common *tn;
192 struct shared_common *d_tn;
197 for (d_tn = __kmp_threadprivate_d_table.data[ q ]; d_tn; d_tn = d_tn->next) {
199 if (d_tn->dt.dtorv != 0) {
200 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
201 if( __kmp_threads[gtid] ) {
202 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
203 (! KMP_UBER_GTID (gtid)) ) {
204 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
205 gtid, d_tn->gbl_addr );
207 (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
212 if (d_tn->obj_init != 0) {
213 (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
217 if (d_tn->dt.dtor != 0) {
218 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
219 if( __kmp_threads[gtid] ) {
220 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
221 (! KMP_UBER_GTID (gtid)) ) {
222 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
223 gtid, d_tn->gbl_addr );
225 (*d_tn->dt.dtor) (tn->par_addr);
230 if (d_tn->obj_init != 0) {
231 (*d_tn->dt.dtor) (d_tn->obj_init);
236 __kmp_threadprivate_d_table.data[ q ] = 0;
243 __kmp_common_destroy_gtid(
int gtid )
245 struct private_common *tn;
246 struct shared_common *d_tn;
248 KC_TRACE( 10, (
"__kmp_common_destroy_gtid: T#%d called\n", gtid ) );
249 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
250 (! KMP_UBER_GTID (gtid)) ) {
252 if( TCR_4(__kmp_init_common) ) {
257 for (tn = __kmp_threads[ gtid ]->th.th_pri_head; tn; tn = tn->link) {
259 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
260 gtid, tn->gbl_addr );
262 KMP_DEBUG_ASSERT( d_tn );
265 if (d_tn->dt.dtorv != 0) {
266 (void) (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
268 if (d_tn->obj_init != 0) {
269 (void) (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
272 if (d_tn->dt.dtor != 0) {
273 (void) (*d_tn->dt.dtor) (tn->par_addr);
275 if (d_tn->obj_init != 0) {
276 (void) (*d_tn->dt.dtor) (d_tn->obj_init);
280 KC_TRACE( 30, (
"__kmp_common_destroy_gtid: T#%d threadprivate destructors complete\n",
289 #ifdef KMP_TASK_COMMON_DEBUG 295 for (p = 0; p < __kmp_all_nth; ++p) {
296 if( !__kmp_threads[p] )
continue;
297 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
298 if (__kmp_threads[ p ]->th.th_pri_common->data[ q ]) {
299 struct private_common *tn;
301 KC_TRACE( 10, (
"\tdump_list: gtid:%d addresses\n", p ) );
303 for (tn = __kmp_threads[ p ]->th.th_pri_common->data[ q ]; tn; tn = tn->next) {
304 KC_TRACE( 10, (
"\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
305 tn->gbl_addr, tn->par_addr ) );
319 kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size )
321 struct shared_common **lnk_tn, *d_tn;
322 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] &&
323 __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 );
325 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
329 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
331 d_tn->gbl_addr = pc_addr;
332 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
341 d_tn->cmn_size = pc_size;
343 __kmp_acquire_lock( &__kmp_global_lock, gtid );
345 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
347 d_tn->next = *lnk_tn;
350 __kmp_release_lock( &__kmp_global_lock, gtid );
354 struct private_common *
355 kmp_threadprivate_insert(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size )
357 struct private_common *tn, **tt;
358 struct shared_common *d_tn;
362 __kmp_acquire_lock( & __kmp_global_lock, gtid );
364 tn = (
struct private_common *) __kmp_allocate(
sizeof (
struct private_common) );
366 tn->gbl_addr = pc_addr;
368 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
374 if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) {
375 d_tn->cmn_size = pc_size;
378 if (d_tn->ct.ctorv != 0) {
382 else if (d_tn->cct.cctorv != 0) {
384 d_tn->obj_init = (
void *) __kmp_allocate( d_tn->cmn_size );
385 (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len);
388 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
391 if (d_tn->ct.ctor != 0) {
395 else if (d_tn->cct.cctor != 0) {
397 d_tn->obj_init = (
void *) __kmp_allocate( d_tn->cmn_size );
398 (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr);
401 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
407 struct shared_common **lnk_tn;
409 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
410 d_tn->gbl_addr = pc_addr;
411 d_tn->cmn_size = pc_size;
412 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
421 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
423 d_tn->next = *lnk_tn;
427 tn->cmn_size = d_tn->cmn_size;
429 if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) {
430 tn->par_addr = (
void *) pc_addr;
433 tn->par_addr = (
void *) __kmp_allocate( tn->cmn_size );
436 __kmp_release_lock( & __kmp_global_lock, gtid );
440 #ifdef USE_CHECKS_COMMON 441 if (pc_size > d_tn->cmn_size) {
442 KC_TRACE( 10, (
"__kmp_threadprivate_insert: THREADPRIVATE: %p (%" 443 KMP_UINTPTR_SPEC
" ,%" KMP_UINTPTR_SPEC
")\n",
444 pc_addr, pc_size, d_tn->cmn_size ) );
445 KMP_FATAL( TPCommonBlocksInconsist );
449 tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]);
451 #ifdef KMP_TASK_COMMON_DEBUG 453 KC_TRACE( 10, (
"__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
460 #ifdef KMP_TASK_COMMON_DEBUG 461 KC_TRACE( 10, (
"__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
468 tn->link = __kmp_threads[ gtid ]->th.th_pri_head;
469 __kmp_threads[ gtid ]->th.th_pri_head = tn;
472 __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr );
475 if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) )
492 if ( d_tn->ct.ctorv != 0) {
493 (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len);
494 }
else if (d_tn->cct.cctorv != 0) {
495 (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len);
496 }
else if (tn->par_addr != tn->gbl_addr) {
497 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
500 if ( d_tn->ct.ctor != 0 ) {
501 (void) (*d_tn->ct.ctor) (tn->par_addr);
502 }
else if (d_tn->cct.cctor != 0) {
503 (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init);
504 }
else if (tn->par_addr != tn->gbl_addr) {
505 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
534 struct shared_common *d_tn, **lnk_tn;
536 KC_TRACE( 10, (
"__kmpc_threadprivate_register: called\n" ) );
538 #ifdef USE_CHECKS_COMMON 540 KMP_ASSERT( cctor == 0);
544 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data );
547 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
548 d_tn->gbl_addr = data;
550 d_tn->ct.ctor = ctor;
551 d_tn->cct.cctor = cctor;
552 d_tn->dt.dtor = dtor;
559 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
561 d_tn->next = *lnk_tn;
567 __kmpc_threadprivate(
ident_t *loc, kmp_int32 global_tid,
void *data,
size_t size)
570 struct private_common *tn;
572 KC_TRACE( 10, (
"__kmpc_threadprivate: T#%d called\n", global_tid ) );
574 #ifdef USE_CHECKS_COMMON 575 if (! __kmp_init_serial)
576 KMP_FATAL( RTLNotInitialized );
579 if ( ! __kmp_threads[global_tid] -> th.th_root -> r.r_active && ! __kmp_foreign_tp ) {
583 KC_TRACE( 20, (
"__kmpc_threadprivate: T#%d inserting private data\n", global_tid ) );
584 kmp_threadprivate_insert_private_data( global_tid, data, data, size );
589 KC_TRACE( 50, (
"__kmpc_threadprivate: T#%d try to find private data at address %p\n",
590 global_tid, data ) );
591 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ global_tid ]->th.th_pri_common, global_tid, data );
594 KC_TRACE( 20, (
"__kmpc_threadprivate: T#%d found data\n", global_tid ) );
595 #ifdef USE_CHECKS_COMMON 596 if ((
size_t) size > tn->cmn_size) {
597 KC_TRACE( 10, (
"THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
" ,%" KMP_UINTPTR_SPEC
")\n",
598 data, size, tn->cmn_size ) );
599 KMP_FATAL( TPCommonBlocksInconsist );
606 KC_TRACE( 20, (
"__kmpc_threadprivate: T#%d inserting data\n", global_tid ) );
607 tn = kmp_threadprivate_insert( global_tid, data, data, size );
612 KC_TRACE( 10, (
"__kmpc_threadprivate: T#%d exiting; return value = %p\n",
632 kmp_int32 global_tid,
637 KC_TRACE( 10, (
"__kmpc_threadprivate_cached: T#%d called with cache: %p, address: %p, size: %" 638 KMP_SIZE_T_SPEC
"\n",
639 global_tid, *cache, data, size ) );
641 if ( TCR_PTR(*cache) == 0) {
642 __kmp_acquire_lock( & __kmp_global_lock, global_tid );
644 if ( TCR_PTR(*cache) == 0) {
645 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
647 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
651 __kmp_allocate(
sizeof(
void * ) * __kmp_tp_capacity +
sizeof ( kmp_cached_addr_t ));
654 KC_TRACE( 50, (
"__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
655 global_tid, my_cache ) );
659 kmp_cached_addr_t *tp_cache_addr;
661 tp_cache_addr = (kmp_cached_addr_t *) & my_cache[__kmp_tp_capacity];
662 tp_cache_addr -> addr = my_cache;
663 tp_cache_addr -> next = __kmp_threadpriv_cache_list;
664 __kmp_threadpriv_cache_list = tp_cache_addr;
668 TCW_PTR( *cache, my_cache);
673 __kmp_release_lock( & __kmp_global_lock, global_tid );
677 if ((ret = TCR_PTR((*cache)[ global_tid ])) == 0) {
678 ret = __kmpc_threadprivate( loc, global_tid, data, (
size_t) size);
680 TCW_PTR( (*cache)[ global_tid ], ret);
682 KC_TRACE( 10, (
"__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
701 size_t vector_length )
703 struct shared_common *d_tn, **lnk_tn;
705 KC_TRACE( 10, (
"__kmpc_threadprivate_register_vec: called\n" ) );
707 #ifdef USE_CHECKS_COMMON 709 KMP_ASSERT( cctor == 0);
712 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
716 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
717 d_tn->gbl_addr = data;
719 d_tn->ct.ctorv = ctor;
720 d_tn->cct.cctorv = cctor;
721 d_tn->dt.dtorv = dtor;
723 d_tn->vec_len = (size_t) vector_length;
728 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
730 d_tn->next = *lnk_tn;
void(* kmpc_dtor)(void *)
void(* kmpc_dtor_vec)(void *, size_t)
void *(* kmpc_ctor_vec)(void *, size_t)
void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
void *(* kmpc_cctor)(void *, void *)
void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
void *(* kmpc_ctor)(void *)
void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)