示例#1
0
GC_INNER void GC_free_inner(void * p)
{
    struct hblk *h;
    hdr *hhdr;
    size_t sz; /* bytes */
    size_t ngranules;  /* sz in granules */
    void ** flh;
    int knd;
    struct obj_kind * ok;

    h = HBLKPTR(p);
    hhdr = HDR(h);
    knd = hhdr -> hb_obj_kind;
    sz = hhdr -> hb_sz;
    ngranules = BYTES_TO_GRANULES(sz);
    ok = &GC_obj_kinds[knd];
    if (ngranules <= MAXOBJGRANULES) {
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        if (ok -> ok_init) {
            BZERO((word *)p + 1, sz-sizeof(word));
        }
        flh = &(ok -> ok_freelist[ngranules]);
        obj_link(p) = *flh;
        *flh = (ptr_t)p;
    } else {
        size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        if (nblocks > 1) {
            GC_large_allocd_bytes -= nblocks * HBLKSIZE;
        }
        GC_freehblk(h);
    }
}
示例#2
0
/* Explicitly deallocate an object p.                           */
GC_API void GC_CALL GC_free(void * p)
{
    struct hblk *h;
    hdr *hhdr;
    size_t sz; /* In bytes */
    size_t ngranules;   /* sz in granules */
    void **flh;
    int knd;
    struct obj_kind * ok;
    DCL_LOCK_STATE;

    if (p == 0) return;
    /* Required by ANSI.  It's not my fault ...     */
#   ifdef LOG_ALLOCS
    GC_log_printf("GC_free(%p) after GC #%lu\n",
                  p, (unsigned long)GC_gc_no);
#   endif
    h = HBLKPTR(p);
    hhdr = HDR(h);
#   if defined(REDIRECT_MALLOC) && \
        (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
         || defined(MSWIN32))
    /* For Solaris, we have to redirect malloc calls during         */
    /* initialization.  For the others, this seems to happen        */
    /* implicitly.                                                  */
    /* Don't try to deallocate that memory.                         */
    if (0 == hhdr) return;
#   endif
    GC_ASSERT(GC_base(p) == p);
    sz = hhdr -> hb_sz;
    ngranules = BYTES_TO_GRANULES(sz);
    knd = hhdr -> hb_obj_kind;
    ok = &GC_obj_kinds[knd];
    if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
        LOCK();
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        /* Its unnecessary to clear the mark bit.  If the       */
        /* object is reallocated, it doesn't matter.  O.w. the  */
        /* collector will do it, since it's on a free list.     */
        if (ok -> ok_init) {
            BZERO((word *)p + 1, sz-sizeof(word));
        }
        flh = &(ok -> ok_freelist[ngranules]);
        obj_link(p) = *flh;
        *flh = (ptr_t)p;
        UNLOCK();
    } else {
        size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
        LOCK();
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        if (nblocks > 1) {
            GC_large_allocd_bytes -= nblocks * HBLKSIZE;
        }
        GC_freehblk(h);
        UNLOCK();
    }
}
示例#3
0
/// The one difference between this one and the one above
/// is the addition of the flag clear.  This shouldn't really
/// matter, but I was getting a bit of weird behavior in the baseline
/// results (milc 100ms longer than previously measured) and wanted to
/// eliminate it
STATIC void HINTGC_reclaim_block(struct hblk *hbp, word report_if_found)
{
    hdr * hhdr = HDR(hbp);
    size_t sz = hhdr -> hb_sz;  /* size of objects in current block     */
    struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
    struct hblk ** rlh;

    // This line added for hinted collection since we have to
    // visit every block header anyways.  Clear out the
    // pending free flags.
    hhdr->hb_flags &= ~HAS_PENDING_FREE;

    if( sz > MAXOBJBYTES ) {  /* 1 big object */
        if( !mark_bit_from_hdr(hhdr, 0) ) {
            if (report_if_found) {
                GC_add_leaked((ptr_t)hbp);
            } else {
                size_t blocks = OBJ_SZ_TO_BLOCKS(sz);
                if (blocks > 1) {
                    GC_large_allocd_bytes -= blocks * HBLKSIZE;
                }
                GC_bytes_found += sz;
                GC_freehblk(hbp);
            }
        } else {
            if (hhdr -> hb_descr != 0) {
                GC_composite_in_use += sz;
            } else {
                GC_atomic_in_use += sz;
            }
        }
    } else {
        GC_bool empty = GC_block_empty(hhdr);
#       ifdef PARALLEL_MARK
        /* Count can be low or one too high because we sometimes      */
        /* have to ignore decrements.  Objects can also potentially   */
        /* be repeatedly marked by each marker.                       */
        /* Here we assume two markers, but this is extremely          */
        /* unlikely to fail spuriously with more.  And if it does, it */
        /* should be looked at.                                       */
        GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
#       else
        GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
#       endif
        if (hhdr -> hb_descr != 0) {
            GC_composite_in_use += sz * hhdr -> hb_n_marks;
        } else {
            GC_atomic_in_use += sz * hhdr -> hb_n_marks;
        }
        if (report_if_found) {
            GC_reclaim_small_nonempty_block(hbp, TRUE /* report_if_found */);
        } else if (empty) {
            GC_bytes_found += HBLKSIZE;
            GC_freehblk(hbp);
        } else if (GC_find_leak || !GC_block_nearly_full(hhdr)) {
            /* group of smaller objects, enqueue the real work */
            rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
            hhdr -> hb_next = *rlh;
            *rlh = hbp;
        } /* else not worth salvaging. */
        /* We used to do the nearly_full check later, but we    */
        /* already have the right cache context here.  Also     */
        /* doing it here avoids some silly lock contention in   */
        /* GC_malloc_many.                                      */
    }
}