/* This call must be made from the new thread. */ GC_INNER void GC_init_thread_local(GC_tlfs p) { int i, j; GC_ASSERT(I_HOLD_LOCK()); if (!EXPECT(keys_initialized, TRUE)) { GC_ASSERT((word)&GC_thread_key % sizeof(word) == 0); if (0 != GC_key_create(&GC_thread_key, reset_thread_key)) { ABORT("Failed to create key for local allocator"); } keys_initialized = TRUE; } if (0 != GC_setspecific(GC_thread_key, p)) { ABORT("Failed to set thread specific allocation pointers"); } for (i = 0; i < MAXOBJKINDS; ++i) { for (j = 1; j < TINY_FREELISTS; ++j) { p->freelists[i][j] = (void *)(word)1; } /* Set up the size 0 free lists. */ /* We now handle most of them like regular free lists, to ensure */ /* That explicit deallocation works. However, allocation of a */ /* size 0 "gcj" object is always an error. */ # ifdef GC_GCJ_SUPPORT if (i == GC_gcj_kind) { p->freelists[i][0] = ERROR_FL; } else { # endif /* else */ { p->freelists[i][0] = (void *)(word)1; } } # ifdef ENABLE_DISCLAIM for (i = 0; i < TINY_FREELISTS; ++i) { p -> finalized_freelists[i] = (void *)(word)1; } p->finalized_freelists[0] = (void *)(word)1; # endif } /* We hold the allocator lock. */ GC_INNER void GC_destroy_thread_local(GC_tlfs p) { int i; /* We currently only do this from the thread itself or from */ /* the fork handler for a child process. */ for (i = 0; i < MAXOBJKINDS; ++i) { return_freelists(p->freelists[i], GC_freelist[i]); } # ifdef ENABLE_DISCLAIM return_freelists(p -> finalized_freelists, (void **)GC_finalized_objfreelist); # endif }
/* We hold the allocator lock. */ void GC_destroy_thread_local(GC_thread p) { /* We currently only do this from the thread itself or from */ /* the fork handler for a child process. */ # ifndef HANDLE_FORK GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p); # endif return_freelists(p -> ptrfree_freelists, GC_aobjfreelist); return_freelists(p -> normal_freelists, GC_objfreelist); # ifdef GC_GCJ_SUPPORT return_freelists(p -> gcj_freelists, GC_gcjobjfreelist); # endif }
/* We hold the allocator lock. */ GC_INNER void GC_destroy_thread_local(GC_tlfs p) { /* We currently only do this from the thread itself or from */ /* the fork handler for a child process. */ return_freelists(p -> ptrfree_freelists, GC_aobjfreelist); return_freelists(p -> normal_freelists, GC_objfreelist); # ifdef GC_GCJ_SUPPORT return_freelists(p -> gcj_freelists, (void **)GC_gcjobjfreelist); # endif # ifdef ENABLE_DISCLAIM return_freelists(p -> finalized_freelists, (void **)GC_finalized_objfreelist); # endif }
/* We hold the allocator lock. */ GC_INNER void GC_destroy_thread_local(GC_tlfs p) { int k; /* We currently only do this from the thread itself or from */ /* the fork handler for a child process. */ GC_STATIC_ASSERT(THREAD_FREELISTS_KINDS <= MAXOBJKINDS); for (k = 0; k < THREAD_FREELISTS_KINDS; ++k) { if (k == (int)GC_n_kinds) break; /* kind is not created */ return_freelists(p -> _freelists[k], GC_obj_kinds[k].ok_freelist); } # ifdef GC_GCJ_SUPPORT return_freelists(p -> gcj_freelists, (void **)GC_gcjobjfreelist); # endif }
/* We hold the allocator lock. */ GC_INNER void GC_destroy_thread_local(GC_tlfs p) { int i; /* We currently only do this from the thread itself or from */ /* the fork handler for a child process. */ GC_STATIC_ASSERT(PREDEFINED_KINDS >= THREAD_FREELISTS_KINDS); for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) { return_freelists(p -> _freelists[i], GC_freelists[i]); } # ifdef GC_GCJ_SUPPORT return_freelists(p -> gcj_freelists, (void **)GC_gcjobjfreelist); # endif # ifdef ENABLE_DISCLAIM return_freelists(p -> finalized_freelists, (void **)GC_finalized_objfreelist); # endif }