GC_API GC_ATTR_MALLOC char * GC_CALL GC_debug_strdup(const char *str, GC_EXTRA_PARAMS) { char *copy; size_t lb; if (str == NULL) { if (GC_find_leak) GC_err_printf("strdup(NULL) behavior is undefined\n"); return NULL; } lb = strlen(str) + 1; copy = (char *)GC_debug_malloc_atomic(lb, OPT_RA s, i); if (copy == NULL) { # ifndef MSWINCE errno = ENOMEM; # endif return NULL; } BCOPY(str, copy, lb); return copy; }
/* Set ofn and ocd to reflect the values we got back. */ static void store_old(void *obj, GC_finalization_proc my_old_fn, struct closure *my_old_cd, GC_finalization_proc *ofn, void **ocd) { if (0 != my_old_fn) { if (my_old_fn == OFN_UNSET) { /* register_finalizer() failed; (*ofn) and (*ocd) are unchanged. */ return; } if (my_old_fn != GC_debug_invoke_finalizer) { GC_err_printf("Debuggable object at %p had a non-debug finalizer\n", obj); /* This should probably be fatal. */ } else { if (ofn) *ofn = my_old_cd -> cl_fn; if (ocd) *ocd = my_old_cd -> cl_data; } } else { if (ofn) *ofn = 0; if (ocd) *ocd = 0; } }
/* is p. */ STATIC void GC_print_type(ptr_t p) { hdr * hhdr = GC_find_header(p); char buffer[GC_TYPE_DESCR_LEN + 1]; int kind = hhdr -> hb_obj_kind; if (0 != GC_describe_type_fns[kind] && GC_is_marked(GC_base(p))) { /* This should preclude free list objects except with */ /* thread-local allocation. */ buffer[GC_TYPE_DESCR_LEN] = 0; (GC_describe_type_fns[kind])(p, buffer); GC_ASSERT(buffer[GC_TYPE_DESCR_LEN] == 0); GC_err_puts(buffer); } else { switch(kind) { case PTRFREE: GC_err_puts("PTRFREE"); break; case NORMAL: GC_err_puts("NORMAL"); break; case UNCOLLECTABLE: GC_err_puts("UNCOLLECTABLE"); break; # ifdef ATOMIC_UNCOLLECTABLE case AUNCOLLECTABLE: GC_err_puts("ATOMIC UNCOLLECTABLE"); break; # endif case STUBBORN: GC_err_puts("STUBBORN"); break; default: GC_err_printf("kind=%d descr=0x%lx", kind, (unsigned long)(hhdr -> hb_descr)); } } }
void GC_debug_register_finalizer_ignore_self (void * obj, GC_finalization_proc fn, void * cd, GC_finalization_proc *ofn, void * *ocd) { GC_finalization_proc my_old_fn; void * my_old_cd; ptr_t base = GC_base(obj); if (0 == base) return; if ((ptr_t)obj - base != sizeof(oh)) { GC_err_printf( "GC_debug_register_finalizer_ignore_self called with " "non-base-pointer %p\n", obj); } if (0 == fn) { GC_register_finalizer_ignore_self(base, 0, 0, &my_old_fn, &my_old_cd); } else { GC_register_finalizer_ignore_self(base, GC_debug_invoke_finalizer, GC_make_closure(fn,cd), &my_old_fn, &my_old_cd); } store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd); }
GC_API void * GC_CALL GC_malloc(size_t bytes) { size_t granules = ROUNDED_UP_GRANULES(bytes); void *tsd; void *result; void **tiny_fl; # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC) GC_key_t k = GC_thread_key; if (EXPECT(0 == k, FALSE)) { /* We haven't yet run GC_init_parallel. That means */ /* we also aren't locking, so this is fairly cheap. */ return GC_core_malloc(bytes); } tsd = GC_getspecific(k); # else tsd = GC_getspecific(GC_thread_key); # endif # if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS) if (EXPECT(0 == tsd, FALSE)) { return GC_core_malloc(bytes); } # endif GC_ASSERT(GC_is_initialized); GC_ASSERT(GC_is_thread_tsd_valid(tsd)); tiny_fl = ((GC_tlfs)tsd) -> normal_freelists; GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES, NORMAL, GC_core_malloc(bytes), obj_link(result)=0); # ifdef LOG_ALLOCS GC_err_printf("GC_malloc(%lu) = %p, GC: %lu\n", (unsigned long)bytes, result, (unsigned long)GC_gc_no); # endif return result; }
GC_API void * GC_CALL GC_debug_malloc(size_t lb, GC_EXTRA_PARAMS) { void * result; /* Note that according to malloc() specification, if size is 0 then */ /* malloc() returns either NULL, or a unique pointer value that can */ /* later be successfully passed to free(). We always do the latter. */ result = GC_malloc(lb + DEBUG_BYTES); # ifdef GC_ADD_CALLER if (s == NULL) { GC_caller_func_offset(ra, &s, &i); } # endif if (result == 0) { GC_err_printf("GC_debug_malloc(%lu) returning NULL (%s:%d)\n", (unsigned long)lb, s, i); return(0); } if (!GC_debugging_started) { GC_start_debugging(); } ADD_CALL_CHAIN(result, ra); return (GC_store_debug_info(result, (word)lb, s, i)); }
STATIC void GC_CALLBACK GC_default_same_obj_print_proc(void * p, void * q) { GC_err_printf("%p and %p are not in the same object\n", p, q); ABORT("GC_same_obj test failed"); }
STATIC void GC_CALLBACK GC_default_is_visible_print_proc(void * p) { GC_err_printf("%p is not a GC visible pointer location\n", p); ABORT("GC_is_visible test failed"); }
STATIC void GC_CALLBACK GC_default_is_valid_displacement_print_proc (void *p) { GC_err_printf("%p does not point to valid object displacement\n", p); ABORT("GC_is_valid_displacement test failed"); }
GC_INNER void GC_default_print_heap_obj_proc(ptr_t p) { ptr_t base = GC_base(p); GC_err_printf("start: %p, appr. length: %ld", base, (unsigned long)GC_size(base)); }
/* q are pointers to the object base, i.e. pointers to an oh. */ static void add_edge(ptr_t p, ptr_t q) { ptr_t pred = GET_OH_BG_PTR(q); back_edges * be, *be_cont; word i; GC_ASSERT(p == GC_base(p) && q == GC_base(q)); if (!GC_HAS_DEBUG_INFO(q) || !GC_HAS_DEBUG_INFO(p)) { /* This is really a misinterpreted free list link, since we saw */ /* a pointer to a free list. Don't overwrite it! */ return; } if (NULL == pred) { static unsigned random_number = 13; # define GOT_LUCKY_NUMBER (((++random_number) & 0x7f) == 0) /* A not very random number we use to occasionally allocate a */ /* back_edges structure even for a single backward edge. This */ /* prevents us from repeatedly tracing back through very long */ /* chains, since we will have some place to store height and */ /* in_progress flags along the way. */ SET_OH_BG_PTR(q, p); if (GOT_LUCKY_NUMBER) ensure_struct(q); return; } /* Check whether it was already in the list of predecessors. */ { back_edges *e = (back_edges *)((word)pred & ~FLAG_MANY); word n_edges; word total; int local = 0; if (((word)pred & FLAG_MANY) != 0) { n_edges = e -> n_edges; } else if (pred != NULL && ((word)pred & 1) == 0) { /* A misinterpreted freelist link. */ n_edges = 1; local = -1; } else { n_edges = 0; } for (total = 0; total < n_edges; ++total) { if (local == MAX_IN) { e = e -> cont; local = 0; } if (local >= 0) pred = e -> edges[local++]; if (pred == p) return; } } ensure_struct(q); be = (back_edges *)((word)GET_OH_BG_PTR(q) & ~FLAG_MANY); for (i = be -> n_edges, be_cont = be; i > MAX_IN; i -= MAX_IN) be_cont = be_cont -> cont; if (i == MAX_IN) { be_cont -> cont = new_back_edges(); be_cont = be_cont -> cont; i = 0; } be_cont -> edges[i] = p; be -> n_edges++; # ifdef DEBUG_PRINT_BIG_N_EDGES if (GC_print_stats == VERBOSE && be -> n_edges == 100) { GC_err_printf("The following object has big in-degree:\n"); GC_print_heap_obj(q); } # endif }
GC_API void * GC_CALL GC_debug_realloc(void * p, size_t lb, GC_EXTRA_PARAMS) { void * base; void * result; hdr * hhdr; if (p == 0) { return GC_debug_malloc(lb, OPT_RA s, i); } if (0 == lb) /* and p != NULL */ { GC_debug_free(p); return NULL; } # ifdef GC_ADD_CALLER if (s == NULL) { GC_caller_func_offset(ra, &s, &i); } # endif base = GC_base(p); if (base == 0) { ABORT_ARG1("Invalid pointer passed to realloc()", ": %p", p); } if ((ptr_t)p - (ptr_t)base != sizeof(oh)) { GC_err_printf( "GC_debug_realloc called on pointer %p w/o debugging info\n", p); return(GC_realloc(p, lb)); } hhdr = HDR(base); switch (hhdr -> hb_obj_kind) { case NORMAL: result = GC_debug_malloc(lb, OPT_RA s, i); break; case PTRFREE: result = GC_debug_malloc_atomic(lb, OPT_RA s, i); break; case UNCOLLECTABLE: result = GC_debug_malloc_uncollectable(lb, OPT_RA s, i); break; # ifdef GC_ATOMIC_UNCOLLECTABLE case AUNCOLLECTABLE: result = GC_debug_malloc_atomic_uncollectable(lb, OPT_RA s, i); break; # endif default: result = NULL; /* initialized to prevent warning. */ ABORT_RET("GC_debug_realloc: encountered bad kind"); } if (result != NULL) { size_t old_sz; # ifdef SHORT_DBG_HDRS old_sz = GC_size(base) - sizeof(oh); # else old_sz = ((oh *)base) -> oh_sz; # endif if (old_sz > 0) BCOPY(p, result, old_sz < lb ? old_sz : lb); GC_debug_free(p); } return(result); }
GC_API void GC_CALL GC_debug_free(void * p) { ptr_t base; if (0 == p) return; base = (ptr_t)GC_base(p); if (NULL == base) { # if defined(REDIRECT_MALLOC) \ && ((defined(NEED_CALLINFO) && defined(GC_HAVE_BUILTIN_BACKTRACE)) \ || defined(GC_LINUX_THREADS) || defined(GC_SOLARIS_THREADS) \ || defined(MSWIN32)) /* In some cases, we should ignore objects that do not belong */ /* to the GC heap. See the comment in GC_free. */ if (!GC_is_heap_ptr(p)) return; # endif ABORT_ARG1("Invalid pointer passed to free()", ": %p", p); } if ((ptr_t)p - (ptr_t)base != sizeof(oh)) { # if defined(REDIRECT_FREE) && defined(USE_PROC_FOR_LIBRARIES) /* TODO: Suppress the warning if free() caller is in libpthread */ /* or libdl. */ # endif GC_err_printf( "GC_debug_free called on pointer %p w/o debugging info\n", p); } else { # ifndef SHORT_DBG_HDRS ptr_t clobbered = GC_check_annotated_obj((oh *)base); word sz = GC_size(base); if (clobbered != 0) { GC_have_errors = TRUE; if (((oh *)base) -> oh_sz == sz) { GC_print_smashed_obj( "GC_debug_free: found previously deallocated (?) object at", p, clobbered); return; /* ignore double free */ } else { GC_print_smashed_obj("GC_debug_free: found smashed location at", p, clobbered); } } /* Invalidate size (mark the object as deallocated) */ ((oh *)base) -> oh_sz = sz; # endif /* SHORT_DBG_HDRS */ } if (GC_find_leak # ifndef SHORT_DBG_HDRS && ((ptr_t)p - (ptr_t)base != sizeof(oh) || !GC_findleak_delay_free) # endif ) { GC_free(base); } else { hdr * hhdr = HDR(p); if (hhdr -> hb_obj_kind == UNCOLLECTABLE # ifdef GC_ATOMIC_UNCOLLECTABLE || hhdr -> hb_obj_kind == AUNCOLLECTABLE # endif ) { GC_free(base); } else { word i; word sz = hhdr -> hb_sz; word obj_sz = BYTES_TO_WORDS(sz - sizeof(oh)); for (i = 0; i < obj_sz; ++i) ((word *)p)[i] = GC_FREED_MEM_MARKER; GC_ASSERT((word *)p + i == (word *)(base + sz)); /* Update the counter even though the real deallocation */ /* is deferred. */ LOCK(); GC_bytes_freed += sz; UNLOCK(); } } /* !GC_find_leak */ }
/* p points to somewhere inside an object with the debugging info. */ STATIC void GC_print_obj(ptr_t p) { oh * ohdr = (oh *)GC_base(p); ptr_t q; hdr * hhdr; int kind; const char *kind_str; char buffer[GC_TYPE_DESCR_LEN + 1]; GC_ASSERT(I_DONT_HOLD_LOCK()); # ifdef LINT2 if (!ohdr) ABORT("Invalid GC_print_obj argument"); # endif q = (ptr_t)(ohdr + 1); /* Print a type description for the object whose client-visible */ /* address is q. */ hhdr = GC_find_header(q); kind = hhdr -> hb_obj_kind; if (0 != GC_describe_type_fns[kind] && GC_is_marked(ohdr)) { /* This should preclude free list objects except with */ /* thread-local allocation. */ buffer[GC_TYPE_DESCR_LEN] = 0; (GC_describe_type_fns[kind])(q, buffer); GC_ASSERT(buffer[GC_TYPE_DESCR_LEN] == 0); kind_str = buffer; } else { switch(kind) { case PTRFREE: kind_str = "PTRFREE"; break; case NORMAL: kind_str = "NORMAL"; break; case UNCOLLECTABLE: kind_str = "UNCOLLECTABLE"; break; # ifdef GC_ATOMIC_UNCOLLECTABLE case AUNCOLLECTABLE: kind_str = "ATOMIC_UNCOLLECTABLE"; break; # endif default: kind_str = NULL; /* The alternative is to use snprintf(buffer) but it is */ /* not quite portable (see vsnprintf in misc.c). */ } } if (NULL != kind_str) { GC_err_printf("%p (%s:%d," IF_NOT_SHORTDBG_HDRS(" sz=%lu,") " %s)\n", (void *)((ptr_t)ohdr + sizeof(oh)), ohdr->oh_string, GET_OH_LINENUM(ohdr) /*, */ COMMA_IFNOT_SHORTDBG_HDRS((unsigned long)ohdr->oh_sz), kind_str); } else { GC_err_printf("%p (%s:%d," IF_NOT_SHORTDBG_HDRS(" sz=%lu,") " kind=%d descr=0x%lx)\n", (void *)((ptr_t)ohdr + sizeof(oh)), ohdr->oh_string, GET_OH_LINENUM(ohdr) /*, */ COMMA_IFNOT_SHORTDBG_HDRS((unsigned long)ohdr->oh_sz), kind, (unsigned long)hhdr->hb_descr); } PRINT_CALL_CHAIN(ohdr); }
/* * This may be called from DllMain, and hence operates under unusual * constraints. In particular, it must be lock-free if GC_win32_dll_threads * is set. Always called from the thread being added. * If GC_win32_dll_threads is not set, we already hold the allocation lock, * except possibly during single-threaded start-up code. */ static GC_thread GC_register_my_thread_inner(struct GC_stack_base *sb, DWORD thread_id) { GC_vthread me; /* The following should be a noop according to the win32 */ /* documentation. There is empirical evidence that it */ /* isn't. - HB */ # if defined(MPROTECT_VDB) # if defined(GWW_VDB) if (GC_incremental && !GC_gww_dirty_init()) SetUnhandledExceptionFilter(GC_write_fault_handler); # else if (GC_incremental) SetUnhandledExceptionFilter(GC_write_fault_handler); # endif # endif if (GC_win32_dll_threads) { int i; /* It appears to be unsafe to acquire a lock here, since this */ /* code is apparently not preeemptible on some systems. */ /* (This is based on complaints, not on Microsoft's official */ /* documentation, which says this should perform "only simple */ /* initialization tasks".) */ /* Hence we make do with nonblocking synchronization. */ /* It has been claimed that DllMain is really only executed with */ /* a particular system lock held, and thus careful use of locking */ /* around code that doesn't call back into the system libraries */ /* might be OK. But this hasn't been tested across all win32 */ /* variants. */ /* cast away volatile qualifier */ for (i = 0; InterlockedExchange((IE_t)&dll_thread_table[i].in_use,1) != 0; i++) { /* Compare-and-swap would make this cleaner, but that's not */ /* supported before Windows 98 and NT 4.0. In Windows 2000, */ /* InterlockedExchange is supposed to be replaced by */ /* InterlockedExchangePointer, but that's not really what I */ /* want here. */ /* FIXME: We should eventually declare Win95 dead and use AO_ */ /* primitives here. */ if (i == MAX_THREADS - 1) ABORT("too many threads"); } /* Update GC_max_thread_index if necessary. The following is safe, */ /* and unlike CompareExchange-based solutions seems to work on all */ /* Windows95 and later platforms. */ /* Unfortunately, GC_max_thread_index may be temporarily out of */ /* bounds, so readers have to compensate. */ while (i > GC_max_thread_index) { InterlockedIncrement((IE_t)&GC_max_thread_index); } if (GC_max_thread_index >= MAX_THREADS) { /* We overshot due to simultaneous increments. */ /* Setting it to MAX_THREADS-1 is always safe. */ GC_max_thread_index = MAX_THREADS - 1; } me = dll_thread_table + i; } else /* Not using DllMain */ { GC_ASSERT(I_HOLD_LOCK()); GC_in_thread_creation = TRUE; /* OK to collect from unknown thread. */ me = GC_new_thread(thread_id); GC_in_thread_creation = FALSE; } # ifdef GC_PTHREADS /* me can be NULL -> segfault */ me -> pthread_id = pthread_self(); # endif if (!DuplicateHandle(GetCurrentProcess(), GetCurrentThread(), GetCurrentProcess(), (HANDLE*)&(me -> handle), 0, 0, DUPLICATE_SAME_ACCESS)) { DWORD last_error = GetLastError(); GC_err_printf("Last error code: %d\n", last_error); ABORT("DuplicateHandle failed"); } me -> stack_base = sb -> mem_base; /* Up until this point, GC_push_all_stacks considers this thread */ /* invalid. */ /* Up until this point, this entry is viewed as reserved but invalid */ /* by GC_delete_thread. */ me -> id = thread_id; # if defined(THREAD_LOCAL_ALLOC) GC_init_thread_local((GC_tlfs)(&(me->tlfs))); # endif if (me -> stack_base == NULL) ABORT("Bad stack base in GC_register_my_thread_inner"); if (GC_win32_dll_threads) { if (GC_please_stop) { AO_store(&GC_attached_thread, TRUE); AO_nop_full(); // Later updates must become visible after this. } /* We'd like to wait here, but can't, since waiting in DllMain */ /* provokes deadlocks. */ /* Thus we force marking to be restarted instead. */ } else { GC_ASSERT(!GC_please_stop); /* Otherwise both we and the thread stopping code would be */ /* holding the allocation lock. */ } return (GC_thread)(me); }