Exemplo n.º 1
0
  /* A size of 0 granules is used for large objects.                    */
  GC_INNER GC_bool GC_add_map_entry(size_t granules)
  {
    unsigned displ;
    unsigned short * new_map;

    if (granules > BYTES_TO_GRANULES(MAXOBJBYTES)) granules = 0;
    if (GC_obj_map[granules] != 0) {
        return(TRUE);
    }
    new_map = (unsigned short *)GC_scratch_alloc(MAP_LEN * sizeof(short));
    if (new_map == 0) return(FALSE);
    GC_COND_LOG_PRINTF(
                "Adding block map for size of %u granules (%u bytes)\n",
                (unsigned)granules, (unsigned)GRANULES_TO_BYTES(granules));
    if (granules == 0) {
      for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) {
        new_map[displ] = 1;  /* Nonzero to get us out of marker fast path. */
      }
    } else {
      for (displ = 0; displ < BYTES_TO_GRANULES(HBLKSIZE); displ++) {
        new_map[displ] = (unsigned short)(displ % granules);
      }
    }
    GC_obj_map[granules] = new_map;
    return(TRUE);
  }
Exemplo n.º 2
0
GC_INNER void GC_free_inner(void * p)
{
    struct hblk *h;
    hdr *hhdr;
    size_t sz; /* bytes */
    size_t ngranules;  /* sz in granules */
    void ** flh;
    int knd;
    struct obj_kind * ok;

    h = HBLKPTR(p);
    hhdr = HDR(h);
    knd = hhdr -> hb_obj_kind;
    sz = hhdr -> hb_sz;
    ngranules = BYTES_TO_GRANULES(sz);
    ok = &GC_obj_kinds[knd];
    if (ngranules <= MAXOBJGRANULES) {
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        if (ok -> ok_init) {
            BZERO((word *)p + 1, sz-sizeof(word));
        }
        flh = &(ok -> ok_freelist[ngranules]);
        obj_link(p) = *flh;
        *flh = (ptr_t)p;
    } else {
        size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        if (nblocks > 1) {
            GC_large_allocd_bytes -= nblocks * HBLKSIZE;
        }
        GC_freehblk(h);
    }
}
Exemplo n.º 3
0
/* Explicitly deallocate an object p.                           */
GC_API void GC_CALL GC_free(void * p)
{
    struct hblk *h;
    hdr *hhdr;
    size_t sz; /* In bytes */
    size_t ngranules;   /* sz in granules */
    void **flh;
    int knd;
    struct obj_kind * ok;
    DCL_LOCK_STATE;

    if (p == 0) return;
    /* Required by ANSI.  It's not my fault ...     */
#   ifdef LOG_ALLOCS
    GC_log_printf("GC_free(%p) after GC #%lu\n",
                  p, (unsigned long)GC_gc_no);
#   endif
    h = HBLKPTR(p);
    hhdr = HDR(h);
#   if defined(REDIRECT_MALLOC) && \
        (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
         || defined(MSWIN32))
    /* For Solaris, we have to redirect malloc calls during         */
    /* initialization.  For the others, this seems to happen        */
    /* implicitly.                                                  */
    /* Don't try to deallocate that memory.                         */
    if (0 == hhdr) return;
#   endif
    GC_ASSERT(GC_base(p) == p);
    sz = hhdr -> hb_sz;
    ngranules = BYTES_TO_GRANULES(sz);
    knd = hhdr -> hb_obj_kind;
    ok = &GC_obj_kinds[knd];
    if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
        LOCK();
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        /* Its unnecessary to clear the mark bit.  If the       */
        /* object is reallocated, it doesn't matter.  O.w. the  */
        /* collector will do it, since it's on a free list.     */
        if (ok -> ok_init) {
            BZERO((word *)p + 1, sz-sizeof(word));
        }
        flh = &(ok -> ok_freelist[ngranules]);
        obj_link(p) = *flh;
        *flh = (ptr_t)p;
        UNLOCK();
    } else {
        size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
        LOCK();
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        if (nblocks > 1) {
            GC_large_allocd_bytes -= nblocks * HBLKSIZE;
        }
        GC_freehblk(h);
        UNLOCK();
    }
}
Exemplo n.º 4
0
/*
 * Restore unmarked small objects in the block pointed to by hbp
 * to the appropriate object free list.
 * If entirely empty blocks are to be completely deallocated, then
 * caller should perform that check.
 */
STATIC void GC_reclaim_small_nonempty_block(struct hblk *hbp,
        GC_bool report_if_found)
{
    hdr *hhdr = HDR(hbp);
    size_t sz = hhdr -> hb_sz;
    struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
    void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);

    hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;

    if (report_if_found) {
        GC_reclaim_check(hbp, hhdr, sz);
    } else {
        *flh = GC_reclaim_generic(hbp, hhdr, sz, ok -> ok_init,
                                  *flh, &GC_bytes_found);
    }
}
Exemplo n.º 5
0
/*
 * Restore unmarked small objects in the block pointed to by hbp
 * to the appropriate object free list.
 * If entirely empty blocks are to be completely deallocated, then
 * caller should perform that check.
 */
void GC_reclaim_small_nonempty_block(struct hblk *hbp,
				     int report_if_found, signed_word *count)
{
    hdr *hhdr = HDR(hbp);
    size_t sz = hhdr -> hb_sz;
    int kind = hhdr -> hb_obj_kind;
    struct obj_kind * ok = &GC_obj_kinds[kind];
    void **flh = &(ok -> ok_freelist[BYTES_TO_GRANULES(sz)]);
    
    hhdr -> hb_last_reclaimed = (unsigned short) GC_gc_no;

    if (report_if_found) {
	GC_reclaim_check(hbp, hhdr, sz);
    } else {
        *flh = GC_reclaim_generic(hbp, hhdr, sz,
				  (ok -> ok_init || GC_debugging_started),
	 			  *flh, &GC_bytes_found);
    }
}
Exemplo n.º 6
0
/// The one difference between this one and the one above
/// is the addition of the flag clear.  This shouldn't really
/// matter, but I was getting a bit of weird behavior in the baseline
/// results (milc 100ms longer than previously measured) and wanted to
/// eliminate it
STATIC void HINTGC_reclaim_block(struct hblk *hbp, word report_if_found)
{
    hdr * hhdr = HDR(hbp);
    size_t sz = hhdr -> hb_sz;  /* size of objects in current block     */
    struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
    struct hblk ** rlh;

    // This line added for hinted collection since we have to
    // visit every block header anyways.  Clear out the
    // pending free flags.
    hhdr->hb_flags &= ~HAS_PENDING_FREE;

    if( sz > MAXOBJBYTES ) {  /* 1 big object */
        if( !mark_bit_from_hdr(hhdr, 0) ) {
            if (report_if_found) {
                GC_add_leaked((ptr_t)hbp);
            } else {
                size_t blocks = OBJ_SZ_TO_BLOCKS(sz);
                if (blocks > 1) {
                    GC_large_allocd_bytes -= blocks * HBLKSIZE;
                }
                GC_bytes_found += sz;
                GC_freehblk(hbp);
            }
        } else {
            if (hhdr -> hb_descr != 0) {
                GC_composite_in_use += sz;
            } else {
                GC_atomic_in_use += sz;
            }
        }
    } else {
        GC_bool empty = GC_block_empty(hhdr);
#       ifdef PARALLEL_MARK
        /* Count can be low or one too high because we sometimes      */
        /* have to ignore decrements.  Objects can also potentially   */
        /* be repeatedly marked by each marker.                       */
        /* Here we assume two markers, but this is extremely          */
        /* unlikely to fail spuriously with more.  And if it does, it */
        /* should be looked at.                                       */
        GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
#       else
        GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
#       endif
        if (hhdr -> hb_descr != 0) {
            GC_composite_in_use += sz * hhdr -> hb_n_marks;
        } else {
            GC_atomic_in_use += sz * hhdr -> hb_n_marks;
        }
        if (report_if_found) {
            GC_reclaim_small_nonempty_block(hbp, TRUE /* report_if_found */);
        } else if (empty) {
            GC_bytes_found += HBLKSIZE;
            GC_freehblk(hbp);
        } else if (GC_find_leak || !GC_block_nearly_full(hhdr)) {
            /* group of smaller objects, enqueue the real work */
            rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
            hhdr -> hb_next = *rlh;
            *rlh = hbp;
        } /* else not worth salvaging. */
        /* We used to do the nearly_full check later, but we    */
        /* already have the right cache context here.  Also     */
        /* doing it here avoids some silly lock contention in   */
        /* GC_malloc_many.                                      */
    }
}