Exemplo n.º 1
0
  GC_INNER void GC_print_finalization_stats(void)
  {
    struct finalizable_object *fo = GC_finalize_now;
    unsigned long ready = 0;

#   ifndef GC_LONG_REFS_NOT_NEEDED
    GC_log_printf(
        "%lu finalization table entries; "
        "%lu short/%lu long disappearing links alive\n",
        (unsigned long)GC_fo_entries,
        (unsigned long)GC_dl_hashtbl.entries,
        (unsigned long)GC_ll_hashtbl.entries);
    for (; 0 != fo; fo = fo_next(fo)) ++ready;
    GC_log_printf("%lu objects are ready for finalization; "
                  "%ld short/%ld long links cleared\n",
                  ready,
                  (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
                  (long)GC_old_ll_entries - (long)GC_ll_hashtbl.entries);
#   else /* GC_LONG_REFS_NOT_NEEDED */
    GC_log_printf(
        "%lu finalization table entries; "
        "%lu disappearing links alive\n",
        (unsigned long)GC_fo_entries,
        (unsigned long)GC_dl_hashtbl.entries);
    for (; 0 != fo; fo = fo_next(fo)) ++ready;
    GC_log_printf("%lu objects are ready for finalization; "
                  "%ld links cleared\n",
                  ready,
                  (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries);
#   endif /* !GC_LONG_REFS_NOT_NEEDED */
  }
Exemplo n.º 2
0
  /* A size of 0 granules is used for large objects.                    */
  GC_INNER GC_bool GC_add_map_entry(size_t granules)
  {
    unsigned displ;
    short * new_map;

    if (granules > BYTES_TO_GRANULES(MAXOBJBYTES)) granules = 0;
    if (GC_obj_map[granules] != 0) {
        return(TRUE);
    }
    new_map = (short *)GC_scratch_alloc(MAP_LEN * sizeof(short));
    if (new_map == 0) return(FALSE);
    if (GC_print_stats)
        GC_log_printf("Adding block map for size of %u granules (%u bytes)\n",
                  (unsigned)granules, (unsigned)(GRANULES_TO_BYTES(granules)));
    if (granules == 0) {
      for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) {
        new_map[displ] = 1;  /* Nonzero to get us out of marker fast path. */
      }
    } else {
      for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) {
        new_map[displ] = (short)(displ % granules);
      }
    }
    GC_obj_map[granules] = new_map;
    return(TRUE);
  }
Exemplo n.º 3
0
int GC_general_register_disappearing_link(void * * link, void * obj)
{
    struct disappearing_link *curr_dl;
    size_t index;
    struct disappearing_link * new_dl;
    DCL_LOCK_STATE;
    
    if ((word)link & (ALIGNMENT-1))
    	ABORT("Bad arg to GC_general_register_disappearing_link");
#   ifdef THREADS
    	LOCK();
#   endif
    if (log_dl_table_size == -1
        || GC_dl_entries > ((word)1 << log_dl_table_size)) {
    	GC_grow_table((struct hash_chain_entry ***)(&dl_head),
    		      &log_dl_table_size);
	if (GC_print_stats) {
	    GC_log_printf("Grew dl table to %u entries\n",
	    	      (1 << log_dl_table_size));
	}
    }
    index = HASH2(link, log_dl_table_size);
    curr_dl = dl_head[index];
    for (curr_dl = dl_head[index]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
        if (curr_dl -> dl_hidden_link == HIDE_POINTER(link)) {
            curr_dl -> dl_hidden_obj = HIDE_POINTER(obj);
#	    ifdef THREADS
                UNLOCK();
#	    endif
            return(1);
        }
    }
    new_dl = (struct disappearing_link *)
    	GC_INTERNAL_MALLOC(sizeof(struct disappearing_link),NORMAL);
    if (0 == new_dl) {
#     ifdef THREADS
	UNLOCK();
#     endif
      new_dl = (struct disappearing_link *)
	      GC_oom_fn(sizeof(struct disappearing_link));
      if (0 == new_dl) {
	GC_finalization_failures++;
	return(2);
      }
      /* It's not likely we'll make it here, but ... */
#     ifdef THREADS
	LOCK();
#     endif
    }
    new_dl -> dl_hidden_obj = HIDE_POINTER(obj);
    new_dl -> dl_hidden_link = HIDE_POINTER(link);
    dl_set_next(new_dl, dl_head[index]);
    dl_head[index] = new_dl;
    GC_dl_entries++;
#   ifdef THREADS
        UNLOCK();
#   endif
    return(0);
}
Exemplo n.º 4
0
/* Explicitly deallocate an object p.                           */
GC_API void GC_CALL GC_free(void * p)
{
    struct hblk *h;
    hdr *hhdr;
    size_t sz; /* In bytes */
    size_t ngranules;   /* sz in granules */
    void **flh;
    int knd;
    struct obj_kind * ok;
    DCL_LOCK_STATE;

    if (p == 0) return;
    /* Required by ANSI.  It's not my fault ...     */
#   ifdef LOG_ALLOCS
    GC_log_printf("GC_free(%p) after GC #%lu\n",
                  p, (unsigned long)GC_gc_no);
#   endif
    h = HBLKPTR(p);
    hhdr = HDR(h);
#   if defined(REDIRECT_MALLOC) && \
        (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
         || defined(MSWIN32))
    /* For Solaris, we have to redirect malloc calls during         */
    /* initialization.  For the others, this seems to happen        */
    /* implicitly.                                                  */
    /* Don't try to deallocate that memory.                         */
    if (0 == hhdr) return;
#   endif
    GC_ASSERT(GC_base(p) == p);
    sz = hhdr -> hb_sz;
    ngranules = BYTES_TO_GRANULES(sz);
    knd = hhdr -> hb_obj_kind;
    ok = &GC_obj_kinds[knd];
    if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
        LOCK();
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        /* Its unnecessary to clear the mark bit.  If the       */
        /* object is reallocated, it doesn't matter.  O.w. the  */
        /* collector will do it, since it's on a free list.     */
        if (ok -> ok_init) {
            BZERO((word *)p + 1, sz-sizeof(word));
        }
        flh = &(ok -> ok_freelist[ngranules]);
        obj_link(p) = *flh;
        *flh = (ptr_t)p;
        UNLOCK();
    } else {
        size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
        LOCK();
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        if (nblocks > 1) {
            GC_large_allocd_bytes -= nblocks * HBLKSIZE;
        }
        GC_freehblk(h);
        UNLOCK();
    }
}
Exemplo n.º 5
0
void GC_push_all_stacks(void)
{
  DWORD me = GetCurrentThreadId();
  GC_bool found_me = FALSE;
  size_t nthreads = 0;
  
  if (GC_win32_dll_threads) {
    int i;
    LONG my_max = GC_get_max_thread_index();

    for (i = 0; i <= my_max; i++) {
      GC_thread t = (GC_thread)(dll_thread_table + i);
      if (t -> in_use) {
        ++nthreads;
        GC_push_stack_for(t);
        if (t -> id == me) found_me = TRUE;
      }
    }
  } else {
    GC_thread t;
    int i;

    for (i = 0; i < THREAD_TABLE_SZ; i++) {
      for (t = GC_threads[i]; t != 0; t = t -> next) {
        ++nthreads;
        if (!KNOWN_FINISHED(t)) GC_push_stack_for(t);
        if (t -> id == me) found_me = TRUE;
      }
    }
  }
  if (GC_print_stats == VERBOSE) {
    GC_log_printf("Pushed %d thread stacks ", nthreads);
    if (GC_win32_dll_threads) {
    	GC_log_printf("based on DllMain thread tracking\n");
    } else {
    	GC_log_printf("\n");
    }
  }
  if (!found_me && !GC_in_thread_creation)
    ABORT("Collecting from unknown thread.");
}
Exemplo n.º 6
0
  GC_INNER void GC_print_finalization_stats(void)
  {
    struct finalizable_object *fo;
    unsigned long ready = 0;

    GC_log_printf("%lu finalization entries;"
                  " %lu/%lu short/long disappearing links alive\n",
                  (unsigned long)GC_fo_entries,
                  (unsigned long)GC_dl_hashtbl.entries,
                  (unsigned long)IF_LONG_REFS_PRESENT_ELSE(
                                                GC_ll_hashtbl.entries, 0));

    for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
      ++ready;
    GC_log_printf("%lu finalization-ready objects;"
                  " %ld/%ld short/long links cleared\n",
                  ready,
                  (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
                  (long)IF_LONG_REFS_PRESENT_ELSE(
                              GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
  }
Exemplo n.º 7
0
/* Internal use only; lock held.        */
STATIC void GC_remove_root_at_pos(int i)
{
#   ifdef DEBUG_ADD_DEL_ROOTS
      GC_log_printf("Remove data root section %d: %p .. %p\n",
                    i, GC_static_roots[i].r_start, GC_static_roots[i].r_end);
#   endif
    GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
    GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
    GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
    GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
    n_root_sets--;
}
Exemplo n.º 8
0
/* Internal use only; lock held.        */
STATIC void GC_remove_root_at_pos(int i)
{
#   ifdef DEBUG_ADD_DEL_ROOTS
      GC_log_printf("Remove data root section at %d: %p .. %p%s\n",
                    i, (void *)GC_static_roots[i].r_start,
                    (void *)GC_static_roots[i].r_end,
                    GC_static_roots[i].r_tmp ? " (temporary)" : "");
#   endif
    GC_root_size -= (GC_static_roots[i].r_end - GC_static_roots[i].r_start);
    GC_static_roots[i].r_start = GC_static_roots[n_root_sets-1].r_start;
    GC_static_roots[i].r_end = GC_static_roots[n_root_sets-1].r_end;
    GC_static_roots[i].r_tmp = GC_static_roots[n_root_sets-1].r_tmp;
    n_root_sets--;
}
Exemplo n.º 9
0
/* Should be called immediately after GC_read_dirty and GC_read_changed. */
void GC_check_dirty(void)
{
    int index;
    unsigned i;
    struct hblk *h;
    ptr_t start;

    GC_check_blocks();

    GC_n_dirty_errors = 0;
    GC_n_faulted_dirty_errors = 0;
    GC_n_changed_errors = 0;
    GC_n_clean = 0;
    GC_n_dirty = 0;

    index = 0;
    for (i = 0; i < GC_n_heap_sects; i++) {
        start = GC_heap_sects[i].hs_start;
        for (h = (struct hblk *)start;
             h < (struct hblk *)(start + GC_heap_sects[i].hs_bytes);
             h++) {
             GC_update_check_page(h, index);
             index++;
             if (index >= NSUMS) goto out;
        }
    }
out:
    if (GC_print_stats)
      GC_log_printf("Checked %lu clean and %lu dirty pages\n",
                    (unsigned long)GC_n_clean, (unsigned long)GC_n_dirty);
    if (GC_n_dirty_errors > 0) {
        GC_err_printf("Found %d dirty bit errors (%d were faulted)\n",
                      GC_n_dirty_errors, GC_n_faulted_dirty_errors);
    }
    if (GC_n_changed_errors > 0) {
        GC_err_printf("Found %lu changed bit errors\n",
                      (unsigned long)GC_n_changed_errors);
        GC_err_printf(
                "These may be benign (provoked by nonpointer changes)\n");
#       ifdef THREADS
          GC_err_printf(
            "Also expect 1 per thread currently allocating a stubborn obj\n");
#       endif
    }
    for (i = 0; i < GC_n_faulted; ++i) {
        GC_faulted[i] = 0; /* Don't expose block pointers to GC */
    }
    GC_n_faulted = 0;
}
Exemplo n.º 10
0
/* Caller does not hold allocation lock. */
void GC_init_gcj_malloc(int mp_index, void * /* really GC_mark_proc */mp)
{
    register int i;
    GC_bool ignore_gcj_info;
    DCL_LOCK_STATE;

    GC_init();	/* In case it's not already done.	*/
    LOCK();
    if (GC_gcj_malloc_initialized) {
      UNLOCK();
      return;
    }
    GC_gcj_malloc_initialized = TRUE;
    ignore_gcj_info = (0 != GETENV("GC_IGNORE_GCJ_INFO"));
    if (GC_print_stats && ignore_gcj_info) {
        GC_log_printf("Gcj-style type information is disabled!\n");
    }
    GC_ASSERT(GC_mark_procs[mp_index] == (GC_mark_proc)0); /* unused */
    GC_mark_procs[mp_index] = (GC_mark_proc)mp;
    if (mp_index >= GC_n_mark_procs) ABORT("GC_init_gcj_malloc: bad index");
    /* Set up object kind gcj-style indirect descriptor. */
      GC_gcjobjfreelist = (ptr_t *)GC_new_free_list_inner();
      if (ignore_gcj_info) {
	/* Use a simple length-based descriptor, thus forcing a fully	*/
	/* conservative scan.						*/
	GC_gcj_kind = GC_new_kind_inner((void **)GC_gcjobjfreelist,
					(0 | GC_DS_LENGTH),
				        TRUE, TRUE);
      } else {
	GC_gcj_kind = GC_new_kind_inner(
			(void **)GC_gcjobjfreelist,
			(((word)(-MARK_DESCR_OFFSET - GC_INDIR_PER_OBJ_BIAS))
	   		 | GC_DS_PER_OBJECT),
			FALSE, TRUE);
      }
    /* Set up object kind for objects that require mark proc call.	*/
      if (ignore_gcj_info) {
	GC_gcj_debug_kind = GC_gcj_kind;
        GC_gcjdebugobjfreelist = GC_gcjobjfreelist;
      } else {
        GC_gcjdebugobjfreelist = (ptr_t *)GC_new_free_list_inner();
	GC_gcj_debug_kind = GC_new_kind_inner(
				(void **)GC_gcjdebugobjfreelist,
				GC_MAKE_PROC(mp_index,
				     	     1 /* allocated with debug info */),
				FALSE, TRUE);
      }
    UNLOCK();
}
Exemplo n.º 11
0
STATIC void GC_check_blocks(void)
{
    word bytes_in_free_blocks = GC_large_free_bytes;

    GC_bytes_in_used_blocks = 0;
    GC_apply_to_all_blocks(GC_add_block, (word)0);
    if (GC_print_stats)
      GC_log_printf("GC_bytes_in_used_blocks = %lu,"
                    " bytes_in_free_blocks = %lu, heapsize = %lu\n",
                    (unsigned long)GC_bytes_in_used_blocks,
                    (unsigned long)bytes_in_free_blocks,
                    (unsigned long)GC_heapsize);
    if (GC_bytes_in_used_blocks + bytes_in_free_blocks != GC_heapsize) {
        GC_err_printf("LOST SOME BLOCKS!!\n");
    }
}
Exemplo n.º 12
0
/*
 * Reclaim all small blocks waiting to be reclaimed.
 * Abort and return FALSE when/if (*stop_func)() returns TRUE.
 * If this returns TRUE, then it's safe to restart the world
 * with incorrectly cleared mark bits.
 * If ignore_old is TRUE, then reclaim only blocks that have been
 * recently reclaimed, and discard the rest.
 * Stop_func may be 0.
 */
GC_INNER GC_bool GC_reclaim_all(GC_stop_func stop_func, GC_bool ignore_old)
{
    word sz;
    unsigned kind;
    hdr * hhdr;
    struct hblk * hbp;
    struct obj_kind * ok;
    struct hblk ** rlp;
    struct hblk ** rlh;
#   ifndef SMALL_CONFIG
    CLOCK_TYPE start_time = 0; /* initialized to prevent warning. */
    CLOCK_TYPE done_time;

    if (GC_print_stats == VERBOSE)
        GET_TIME(start_time);
#   endif

    for (kind = 0; kind < GC_n_kinds; kind++) {
        ok = &(GC_obj_kinds[kind]);
        rlp = ok -> ok_reclaim_list;
        if (rlp == 0) continue;
        for (sz = 1; sz <= MAXOBJGRANULES; sz++) {
            rlh = rlp + sz;
            while ((hbp = *rlh) != 0) {
                if (stop_func != (GC_stop_func)0 && (*stop_func)()) {
                    return(FALSE);
                }
                hhdr = HDR(hbp);
                *rlh = hhdr -> hb_next;
                if (!ignore_old || hhdr -> hb_last_reclaimed == GC_gc_no - 1) {
                    /* It's likely we'll need it this time, too */
                    /* It's been touched recently, so this      */
                    /* shouldn't trigger paging.                */
                    GC_reclaim_small_nonempty_block(hbp, FALSE);
                }
            }
        }
    }
#   ifndef SMALL_CONFIG
    if (GC_print_stats == VERBOSE) {
        GET_TIME(done_time);
        GC_log_printf("Disposing of reclaim lists took %lu msecs\n",
                      MS_TIME_DIFF(done_time,start_time));
    }
#   endif
    return(TRUE);
}
Exemplo n.º 13
0
GC_INNER ptr_t GC_scratch_alloc(size_t bytes)
{
    register ptr_t result = scratch_free_ptr;

    bytes += GRANULE_BYTES-1;
    bytes &= ~(GRANULE_BYTES-1);
    scratch_free_ptr += bytes;
    if (scratch_free_ptr <= GC_scratch_end_ptr) {
        return(result);
    }
    {
        word bytes_to_get = MINHINCR * HBLKSIZE;

        if (bytes_to_get <= bytes) {
          /* Undo the damage, and get memory directly */
            bytes_to_get = bytes;
#           ifdef USE_MMAP
                bytes_to_get += GC_page_size - 1;
                bytes_to_get &= ~(GC_page_size - 1);
#           endif
            result = (ptr_t)GET_MEM(bytes_to_get);
            GC_add_to_our_memory(result, bytes_to_get);
            scratch_free_ptr -= bytes;
            GC_scratch_last_end_ptr = result + bytes;
            return(result);
        }
        result = (ptr_t)GET_MEM(bytes_to_get);
        GC_add_to_our_memory(result, bytes_to_get);
        if (result == 0) {
            if (GC_print_stats)
                GC_log_printf("Out of memory - trying to allocate less\n");
            scratch_free_ptr -= bytes;
            bytes_to_get = bytes;
#           ifdef USE_MMAP
                bytes_to_get += GC_page_size - 1;
                bytes_to_get &= ~(GC_page_size - 1);
#           endif
            result = (ptr_t)GET_MEM(bytes_to_get);
            GC_add_to_our_memory(result, bytes_to_get);
            return result;
        }
        scratch_free_ptr = result;
        GC_scratch_end_ptr = scratch_free_ptr + bytes_to_get;
        GC_scratch_last_end_ptr = GC_scratch_end_ptr;
        return(GC_scratch_alloc(bytes));
    }
}
Exemplo n.º 14
0
GC_API void GC_CALL GC_clear_roots(void)
{
    DCL_LOCK_STATE;

    if (!EXPECT(GC_is_initialized, TRUE)) GC_init();
    LOCK();
    roots_were_cleared = TRUE;
    n_root_sets = 0;
    GC_root_size = 0;
#   if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
      BZERO(GC_root_index, RT_SIZE * sizeof(void *));
#   endif
#   ifdef DEBUG_ADD_DEL_ROOTS
      GC_log_printf("Clear all data root sections\n");
#   endif
    UNLOCK();
}
Exemplo n.º 15
0
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int knd)
{
    size_t granules;
    void *tsd;
    void *result;

#   if MAXOBJKINDS > THREAD_FREELISTS_KINDS
      if (EXPECT(knd >= THREAD_FREELISTS_KINDS, FALSE)) {
        return GC_malloc_kind_global(bytes, knd);
      }
#   endif
#   if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
    {
      GC_key_t k = GC_thread_key;

      if (EXPECT(0 == k, FALSE)) {
        /* We haven't yet run GC_init_parallel.  That means     */
        /* we also aren't locking, so this is fairly cheap.     */
        return GC_malloc_kind_global(bytes, knd);
      }
      tsd = GC_getspecific(k);
    }
#   else
      tsd = GC_getspecific(GC_thread_key);
#   endif
#   if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
      if (EXPECT(0 == tsd, FALSE)) {
        return GC_malloc_kind_global(bytes, knd);
      }
#   endif
    GC_ASSERT(GC_is_initialized);
    GC_ASSERT(GC_is_thread_tsd_valid(tsd));
    granules = ROUNDED_UP_GRANULES(bytes);
    GC_FAST_MALLOC_GRANS(result, granules,
                         ((GC_tlfs)tsd) -> _freelists[knd], DIRECT_GRANULES,
                         knd, GC_malloc_kind_global(bytes, knd),
                         (void)(knd == PTRFREE ? NULL
                                               : (obj_link(result) = 0)));
#   ifdef LOG_ALLOCS
      GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n",
                    (unsigned long)bytes, knd, result,
                    (unsigned long)GC_gc_no);
#   endif
    return result;
}
Exemplo n.º 16
0
static void start_mark_threads(void)
{
    unsigned i;
    pthread_attr_t attr;

    if (GC_markers > MAX_MARKERS) {
	WARN("Limiting number of mark threads\n", 0);
	GC_markers = MAX_MARKERS;
    }
    if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
	
    if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
	ABORT("pthread_attr_setdetachstate failed");

#   if defined(HPUX) || defined(GC_DGUX386_THREADS)
      /* Default stack size is usually too small: fix it. */
      /* Otherwise marker threads or GC may run out of	  */
      /* space.						  */
#     define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
      {
	size_t old_size;
	int code;

        if (pthread_attr_getstacksize(&attr, &old_size) != 0)
	  ABORT("pthread_attr_getstacksize failed\n");
	if (old_size < MIN_STACK_SIZE) {
	  if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
		  ABORT("pthread_attr_setstacksize failed\n");
	}
      }
#   endif /* HPUX || GC_DGUX386_THREADS */
    if (GC_print_stats) {
	GC_log_printf("Starting %ld marker threads\n", GC_markers - 1);
    }
    for (i = 0; i < GC_markers - 1; ++i) {
      if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
			      GC_mark_thread, (void *)(word)i)) {
	WARN("Marker thread creation failed, errno = %ld.\n", errno);
      }
    }
}
Exemplo n.º 17
0
/* collections to amortize the collection cost.  Should be non-zero.    */
static word min_bytes_allocd(void)
{
    word result;
    word stack_size;
    word total_root_size;       /* includes double stack size,  */
                                /* since the stack is expensive */
                                /* to scan.                     */
    word scan_size;             /* Estimate of memory to be scanned     */
                                /* during normal GC.                    */

#   ifdef THREADS
      if (GC_need_to_lock) {
        /* We are multi-threaded... */
        stack_size = GC_total_stacksize;
        /* For now, we just use the value computed during the latest GC. */
#       ifdef DEBUG_THREADS
          GC_log_printf("Total stacks size: %lu\n",
                        (unsigned long)stack_size);
#       endif
      } else
#   endif
    /* else*/ {
#     ifdef STACK_NOT_SCANNED
        stack_size = 0;
#     elif defined(STACK_GROWS_UP)
        stack_size = GC_approx_sp() - GC_stackbottom;
#     else
        stack_size = GC_stackbottom - GC_approx_sp();
#     endif
    }

    total_root_size = 2 * stack_size + GC_root_size;
    scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
                + total_root_size;
    result = scan_size / GC_free_space_divisor;
    if (GC_incremental) {
      result /= 2;
    }
    return result > 0 ? result : 1;
}
Exemplo n.º 18
0
Arquivo: alloc.c Projeto: yroux/bdwgc
  STATIC int GC_CALLBACK GC_timeout_stop_func (void)
  {
    CLOCK_TYPE current_time;
    static unsigned count = 0;
    unsigned long time_diff;

    if ((*GC_default_stop_func)())
      return(1);

    if ((count++ & 3) != 0) return(0);
    GET_TIME(current_time);
    time_diff = MS_TIME_DIFF(current_time,GC_start_time);
    if (time_diff >= GC_time_limit) {
        if (GC_print_stats) {
          GC_log_printf(
                "Abandoning stopped marking after %lu msecs (attempt %d)\n",
                time_diff, GC_n_attempts);
        }
        return(1);
    }
    return(0);
  }
Exemplo n.º 19
0
/* Invoked from GC_start_routine(). */
void * GC_CALLBACK GC_inner_start_routine(struct GC_stack_base *sb, void *arg)
{
  void * (*start)(void *);
  void * start_arg;
  void * result;
  volatile GC_thread me =
                GC_start_rtn_prepare_thread(&start, &start_arg, sb, arg);

# ifndef NACL
    pthread_cleanup_push(GC_thread_exit_proc, me);
# endif
  result = (*start)(start_arg);
# ifdef DEBUG_THREADS
    GC_log_printf("Finishing thread %p\n", (void *)pthread_self());
# endif
  me -> status = result;
# ifndef NACL
    pthread_cleanup_pop(1);
    /* Cleanup acquires lock, ensuring that we can't exit while         */
    /* a collection that thinks we're alive is trying to stop us.       */
# endif
  return result;
}
Exemplo n.º 20
0
/* collections to amortize the collection cost.                         */
static word min_bytes_allocd(void)
{
    int dummy; /* GC_stackbottom is used only for a single-threaded case. */
#   ifdef STACK_GROWS_UP
      word stack_size = (ptr_t)(&dummy) - GC_stackbottom;
#   else
      word stack_size = GC_stackbottom - (ptr_t)(&dummy);
#   endif

    word total_root_size;       /* includes double stack size,  */
                                /* since the stack is expensive */
                                /* to scan.                     */
    word scan_size;             /* Estimate of memory to be scanned     */
                                /* during normal GC.                    */

#   ifdef THREADS
      if (GC_need_to_lock) {
        /* We are multi-threaded... */
        stack_size = GC_total_stacksize;
        /* For now, we just use the value computed during the latest GC. */
#       ifdef DEBUG_THREADS
          GC_log_printf("Total stacks size: %lu\n",
                        (unsigned long)stack_size);
#       endif
      }
#   endif

    total_root_size = 2 * stack_size + GC_root_size;
    scan_size = 2 * GC_composite_in_use + GC_atomic_in_use / 4
                + total_root_size;
    if (GC_incremental) {
        return scan_size / (2 * GC_free_space_divisor);
    } else {
        return scan_size / GC_free_space_divisor;
    }
}
Exemplo n.º 21
0
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t bytes)
{
    size_t granules = ROUNDED_UP_GRANULES(bytes);
    void *tsd;
    void *result;
    void **tiny_fl;

#   if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
      GC_key_t k = GC_thread_key;
      if (EXPECT(0 == k, FALSE)) {
        /* We haven't yet run GC_init_parallel.  That means     */
        /* we also aren't locking, so this is fairly cheap.     */
        return GC_core_malloc(bytes);
      }
      tsd = GC_getspecific(k);
#   else
      tsd = GC_getspecific(GC_thread_key);
#   endif
#   if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
      if (EXPECT(0 == tsd, FALSE)) {
        return GC_core_malloc(bytes);
      }
#   endif
    GC_ASSERT(GC_is_initialized);

    GC_ASSERT(GC_is_thread_tsd_valid(tsd));

    tiny_fl = ((GC_tlfs)tsd) -> normal_freelists;
    GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
                         NORMAL, GC_core_malloc(bytes), obj_link(result)=0);
#   ifdef LOG_ALLOCS
      GC_log_printf("GC_malloc(%lu) returned %p, recent GC #%lu\n",
                    (unsigned long)bytes, result, (unsigned long)GC_gc_no);
#   endif
    return result;
}
Exemplo n.º 22
0
/* re-registering dynamic libraries.                                    */
void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
{
    GC_ASSERT((word)b <= (word)e);
    b = (ptr_t)(((word)b + (sizeof(word) - 1)) & ~(word)(sizeof(word) - 1));
                                        /* round b up to word boundary */
    e = (ptr_t)((word)e & ~(word)(sizeof(word) - 1));
                                        /* round e down to word boundary */
    if ((word)b >= (word)e) return; /* nothing to do */

#   if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
      /* Spend the time to ensure that there are no overlapping */
      /* or adjacent intervals.                                 */
      /* This could be done faster with e.g. a                  */
      /* balanced tree.  But the execution time here is         */
      /* virtually guaranteed to be dominated by the time it    */
      /* takes to scan the roots.                               */
      {
        int i;
        struct roots * old = NULL; /* initialized to prevent warning. */

        for (i = 0; i < n_root_sets; i++) {
            old = GC_static_roots + i;
            if ((word)b <= (word)old->r_end
                 && (word)e >= (word)old->r_start) {
                if ((word)b < (word)old->r_start) {
                    GC_root_size += old->r_start - b;
                    old -> r_start = b;
                }
                if ((word)e > (word)old->r_end) {
                    GC_root_size += e - old->r_end;
                    old -> r_end = e;
                }
                old -> r_tmp &= tmp;
                break;
            }
        }
        if (i < n_root_sets) {
          /* merge other overlapping intervals */
            struct roots *other;

            for (i++; i < n_root_sets; i++) {
              other = GC_static_roots + i;
              b = other -> r_start;
              e = other -> r_end;
              if ((word)b <= (word)old->r_end
                  && (word)e >= (word)old->r_start) {
                if ((word)b < (word)old->r_start) {
                    GC_root_size += old->r_start - b;
                    old -> r_start = b;
                }
                if ((word)e > (word)old->r_end) {
                    GC_root_size += e - old->r_end;
                    old -> r_end = e;
                }
                old -> r_tmp &= other -> r_tmp;
                /* Delete this entry. */
                  GC_root_size -= (other -> r_end - other -> r_start);
                  other -> r_start = GC_static_roots[n_root_sets-1].r_start;
                  other -> r_end = GC_static_roots[n_root_sets-1].r_end;
                  n_root_sets--;
              }
            }
          return;
        }
      }
#   else
      {
        struct roots * old = (struct roots *)GC_roots_present(b);

        if (old != 0) {
          if ((word)e <= (word)old->r_end) {
            old -> r_tmp &= tmp;
            return; /* already there */
          }
          if (old -> r_tmp == tmp || !tmp) {
            /* Extend the existing root. */
            GC_root_size += e - old -> r_end;
            old -> r_end = e;
            old -> r_tmp = tmp;
            return;
          }
          b = old -> r_end;
        }
      }
#   endif
    if (n_root_sets == MAX_ROOT_SETS) {
        ABORT("Too many root sets");
    }

#   ifdef DEBUG_ADD_DEL_ROOTS
      GC_log_printf("Adding data root section %d: %p .. %p%s\n",
                    n_root_sets, (void *)b, (void *)e,
                    tmp ? " (temporary)" : "");
#   endif
    GC_static_roots[n_root_sets].r_start = (ptr_t)b;
    GC_static_roots[n_root_sets].r_end = (ptr_t)e;
    GC_static_roots[n_root_sets].r_tmp = tmp;
#   if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
      GC_static_roots[n_root_sets].r_next = 0;
      add_roots_to_index(GC_static_roots + n_root_sets);
#   endif
    GC_root_size += e - b;
    n_root_sets++;
}
Exemplo n.º 23
0
/* finalized when this finalizer is invoked.			*/
GC_API void GC_register_finalizer_inner(void * obj,
					GC_finalization_proc fn, void *cd,
					GC_finalization_proc *ofn, void **ocd,
					finalization_mark_proc mp)
{
    ptr_t base;
    struct finalizable_object * curr_fo, * prev_fo;
    size_t index;
    struct finalizable_object *new_fo;
    hdr *hhdr;
    DCL_LOCK_STATE;

#   ifdef THREADS
	LOCK();
#   endif
    if (log_fo_table_size == -1
        || GC_fo_entries > ((word)1 << log_fo_table_size)) {
    	GC_grow_table((struct hash_chain_entry ***)(&fo_head),
    		      &log_fo_table_size);
	if (GC_print_stats) {
	    GC_log_printf("Grew fo table to %u entries\n",
	    	          (1 << log_fo_table_size));
	}
    }
    /* in the THREADS case signals are disabled and we hold allocation	*/
    /* lock; otherwise neither is true.  Proceed carefully.		*/
    base = (ptr_t)obj;
    index = HASH2(base, log_fo_table_size);
    prev_fo = 0; curr_fo = fo_head[index];
    while (curr_fo != 0) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
            /* Interruption by a signal in the middle of this	*/
            /* should be safe.  The client may see only *ocd	*/
            /* updated, but we'll declare that to be his	*/
            /* problem.						*/
            if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
            if (ofn) *ofn = curr_fo -> fo_fn;
            /* Delete the structure for base. */
                if (prev_fo == 0) {
                  fo_head[index] = fo_next(curr_fo);
                } else {
                  fo_set_next(prev_fo, fo_next(curr_fo));
                }
            if (fn == 0) {
                GC_fo_entries--;
                  /* May not happen if we get a signal.  But a high	*/
                  /* estimate will only make the table larger than	*/
                  /* necessary.						*/
#		if !defined(THREADS) && !defined(DBG_HDRS_ALL)
                  GC_free((void *)curr_fo);
#		endif
            } else {
                curr_fo -> fo_fn = fn;
                curr_fo -> fo_client_data = (ptr_t)cd;
                curr_fo -> fo_mark_proc = mp;
		/* Reinsert it.  We deleted it first to maintain	*/
		/* consistency in the event of a signal.		*/
		if (prev_fo == 0) {
                  fo_head[index] = curr_fo;
                } else {
                  fo_set_next(prev_fo, curr_fo);
                }
            }
#	    ifdef THREADS
                UNLOCK();
#	    endif
            return;
        }
        prev_fo = curr_fo;
        curr_fo = fo_next(curr_fo);
    }
    if (ofn) *ofn = 0;
    if (ocd) *ocd = 0;
    if (fn == 0) {
#	ifdef THREADS
            UNLOCK();
#	endif
        return;
    }
    GET_HDR(base, hhdr);
    if (0 == hhdr) {
      /* We won't collect it, hence finalizer wouldn't be run. */
#     ifdef THREADS
          UNLOCK();
#     endif
      return;
    }
    new_fo = (struct finalizable_object *)
    	GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
    if (EXPECT(0 == new_fo, FALSE)) {
#     ifdef THREADS
	UNLOCK();
#     endif
      new_fo = (struct finalizable_object *)
	      GC_oom_fn(sizeof(struct finalizable_object));
      if (0 == new_fo) {
	GC_finalization_failures++;
	return;
      }
      /* It's not likely we'll make it here, but ... */
#     ifdef THREADS
	LOCK();
#     endif
    }
    GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
    new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
    new_fo -> fo_fn = fn;
    new_fo -> fo_client_data = (ptr_t)cd;
    new_fo -> fo_object_size = hhdr -> hb_sz;
    new_fo -> fo_mark_proc = mp;
    fo_set_next(new_fo, fo_head[index]);
    GC_fo_entries++;
    fo_head[index] = new_fo;
#   ifdef THREADS
        UNLOCK();
#   endif
}
Exemplo n.º 24
0
  /* this function is called repeatedly by GC_register_map_entries.     */
  GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e)
  {
    int i;
    GC_bool rebuild = FALSE;

    GC_ASSERT(I_HOLD_LOCK());
    GC_ASSERT((word)b % sizeof(word) == 0 && (word)e % sizeof(word) == 0);
    for (i = 0; i < n_root_sets; i++) {
      ptr_t r_start, r_end;

      if (GC_static_roots[i].r_tmp) {
        /* The remaining roots are skipped as they are all temporary. */
#       ifdef GC_ASSERTIONS
          int j;
          for (j = i + 1; j < n_root_sets; j++) {
            GC_ASSERT(GC_static_roots[j].r_tmp);
          }
#       endif
        break;
      }
      r_start = GC_static_roots[i].r_start;
      r_end = GC_static_roots[i].r_end;
      if (!EXPECT((word)e <= (word)r_start || (word)r_end <= (word)b, TRUE)) {
#       ifdef DEBUG_ADD_DEL_ROOTS
          GC_log_printf("Removing %p .. %p from root section %d (%p .. %p)\n",
                        (void *)b, (void *)e,
                        i, (void *)r_start, (void *)r_end);
#       endif
        if ((word)r_start < (word)b) {
          GC_root_size -= r_end - b;
          GC_static_roots[i].r_end = b;
          /* No need to rebuild as hash does not use r_end value. */
          if ((word)e < (word)r_end) {
            int j;

            if (rebuild) {
              GC_rebuild_root_index();
              rebuild = FALSE;
            }
            GC_add_roots_inner(e, r_end, FALSE); /* updates n_root_sets */
            for (j = i + 1; j < n_root_sets; j++)
              if (GC_static_roots[j].r_tmp)
                break;
            if (j < n_root_sets-1 && !GC_static_roots[n_root_sets-1].r_tmp) {
              /* Exchange the roots to have all temporary ones at the end. */
              ptr_t tmp_r_start = GC_static_roots[j].r_start;
              ptr_t tmp_r_end = GC_static_roots[j].r_end;

              GC_static_roots[j].r_start =
                                GC_static_roots[n_root_sets-1].r_start;
              GC_static_roots[j].r_end = GC_static_roots[n_root_sets-1].r_end;
              GC_static_roots[j].r_tmp = FALSE;
              GC_static_roots[n_root_sets-1].r_start = tmp_r_start;
              GC_static_roots[n_root_sets-1].r_end = tmp_r_end;
              GC_static_roots[n_root_sets-1].r_tmp = TRUE;
              rebuild = TRUE;
            }
          }
        } else {
          if ((word)e < (word)r_end) {
            GC_root_size -= e - r_start;
            GC_static_roots[i].r_start = e;
          } else {
            GC_remove_root_at_pos(i);
            i--;
          }
          rebuild = TRUE;
        }
      }
    }
    if (rebuild)
      GC_rebuild_root_index();
  }