Esempio n. 1
0
GC_PTR GC_local_malloc_atomic(size_t bytes)
{
    if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
        return(GC_malloc_atomic(bytes));
    } else {
	int index = INDEX_FROM_BYTES(bytes);
	ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
		        -> ptrfree_freelists + index;
	ptr_t my_entry = *my_fl;
    
	if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
	    GC_PTR result = (GC_PTR)my_entry;
	    *my_fl = obj_link(my_entry);
	    return result;
	} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
	    *my_fl = my_entry + index + 1;
        return GC_malloc_atomic(bytes);
	} else {
	    GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
	    /* *my_fl is updated while the collector is excluded;	*/
	    /* the free list is always visible to the collector as 	*/
	    /* such.							*/
	    if (*my_fl == 0) return GC_oom_fn(bytes);
	    return GC_local_malloc_atomic(bytes);
	}
    }
}
Esempio n. 2
0
GC_PTR GC_local_malloc(size_t bytes)
{
    if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
        return(GC_malloc(bytes));
    } else {
	int index = INDEX_FROM_BYTES(bytes);
	ptr_t * my_fl;
	ptr_t my_entry;
#	if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
	GC_key_t k = GC_thread_key;
#	endif
	void * tsd;

#	if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
	    if (EXPECT(0 == k, 0)) {
		/* This can happen if we get called when the world is	*/
		/* being initialized.  Whether we can actually complete	*/
		/* the initialization then is unclear.			*/
		GC_init_parallel();
		k = GC_thread_key;
	    }
#	endif
	tsd = GC_getspecific(GC_thread_key);
#	ifdef GC_ASSERTIONS
	  LOCK();
	  GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
	  UNLOCK();
#	endif
	my_fl = ((GC_thread)tsd) -> normal_freelists + index;
	my_entry = *my_fl;
	if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
	    ptr_t next = obj_link(my_entry);
	    GC_PTR result = (GC_PTR)my_entry;
	    *my_fl = next;
	    obj_link(my_entry) = 0;
	    PREFETCH_FOR_WRITE(next);
	    return result;
	} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
	    *my_fl = my_entry + index + 1;
            return GC_malloc(bytes);
	} else {
	    GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
	    if (*my_fl == 0) return GC_oom_fn(bytes);
	    return GC_local_malloc(bytes);
	}
    }
}
Esempio n. 3
0
GC_PTR GC_local_gcj_malloc(size_t bytes,
			   void * ptr_to_struct_containing_descr)
{
    GC_ASSERT(GC_gcj_malloc_initialized);
    if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
        return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
    } else {
	int index = INDEX_FROM_BYTES(bytes);
	ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
	                -> gcj_freelists + index;
	ptr_t my_entry = *my_fl;
	if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
	    GC_PTR result = (GC_PTR)my_entry;
	    GC_ASSERT(!GC_incremental);
	    /* We assert that any concurrent marker will stop us.	*/
	    /* Thus it is impossible for a mark procedure to see the 	*/
	    /* allocation of the next object, but to see this object 	*/
	    /* still containing a free list pointer.  Otherwise the 	*/
	    /* marker might find a random "mark descriptor".		*/
	    *(volatile ptr_t *)my_fl = obj_link(my_entry);
	    /* We must update the freelist before we store the pointer.	*/
	    /* Otherwise a GC at this point would see a corrupted	*/
	    /* free list.						*/
	    /* A memory barrier is probably never needed, since the 	*/
	    /* action of stopping this thread will cause prior writes	*/
	    /* to complete.						*/
	    GC_ASSERT(((void * volatile *)result)[1] == 0); 
	    *(void * volatile *)result = ptr_to_struct_containing_descr; 
	    return result;
	} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
	    if (!GC_incremental) *my_fl = my_entry + index + 1;
	    	/* In the incremental case, we always have to take this */
	    	/* path.  Thus we leave the counter alone.		*/
            return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
	} else {
	    GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
	    if (*my_fl == 0) return GC_oom_fn(bytes);
	    return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
	}
    }
}
Esempio n. 4
0
  GC_API void * GC_CALL GC_finalized_malloc(size_t client_lb,
                                const struct GC_finalizer_closure *fclos)
  {
    size_t lb = client_lb + sizeof(void *);
    size_t lg = ROUNDED_UP_GRANULES(lb);
    GC_tlfs tsd;
    void *result;
    void **tiny_fl, **my_fl, *my_entry;
    void *next;

    if (EXPECT(lg >= GC_TINY_FREELISTS, FALSE))
        return GC_core_finalized_malloc(client_lb, fclos);

    tsd = GC_getspecific(GC_thread_key);
    tiny_fl = tsd->finalized_freelists;
    my_fl = tiny_fl + lg;
    my_entry = *my_fl;
    while (EXPECT((word)my_entry
                  <= DIRECT_GRANULES + GC_TINY_FREELISTS + 1, FALSE)) {
        if ((word)my_entry - 1 < DIRECT_GRANULES) {
            *my_fl = (ptr_t)my_entry + lg + 1;
            return GC_core_finalized_malloc(client_lb, fclos);
        } else {
            GC_generic_malloc_many(GC_RAW_BYTES_FROM_INDEX(lg),
                                   GC_finalized_kind, my_fl);
            my_entry = *my_fl;
            if (my_entry == 0) {
                return (*GC_get_oom_fn())(lb);
            }
        }
    }

    next = obj_link(my_entry);
    result = (void *)my_entry;
    *my_fl = next;
    obj_link(result) = 0;
    ((const void **)result)[GRANULES_TO_WORDS(lg) - 1] = fclos;
    PREFETCH_FOR_WRITE(next);
    return result;
  }
Esempio n. 5
0
File: boehm_gc.c Progetto: phs75/gap
/****************************************************************************
**
*F  AllocateBagMemory( <gc_type>, <type>, <size> )
**
**  Allocate memory for a new bag.
**
**  'AllocateBagMemory' is an auxiliary routine for the Boehm GC that
**  allocates memory from the appropriate pool. 'gc_type' is -1 if all words
**  in the bag can refer to other bags, 0 if the bag will not contain any
**  references to other bags, and > 0 to indicate a specific memory layout
**  descriptor.
**/
void * AllocateBagMemory(int gc_type, int type, UInt size)
{
    assert(gc_type >= -1);
    void * result = NULL;
    if (size <= TL_GC_SIZE) {
        UInt alloc_seg, alloc_size;
        alloc_size = (size + GRANULE_SIZE - 1) / GRANULE_SIZE;
        alloc_seg = TLAllocatorSeg[alloc_size];
        alloc_size = TLAllocatorSize[alloc_seg];
        void *** freeList = STATE(FreeList);
        if (!freeList[gc_type + 1]) {
            freeList[gc_type + 1] =
                GC_malloc(sizeof(void *) * TLAllocatorMaxSeg);
        }
        void ** freeListForType = freeList[gc_type + 1];
        result = freeListForType[alloc_seg];
        if (!result) {
            if (gc_type < 0)
                freeListForType[alloc_seg] = GC_malloc_many(alloc_size);
            else
                GC_generic_malloc_many(alloc_size, GCMKind[gc_type],
                                       &freeListForType[alloc_seg]);
            result = freeListForType[alloc_seg];
        }
        freeListForType[alloc_seg] = *(void **)result;
        memset(result, 0, alloc_size);
    }
    else {
        if (gc_type >= 0)
            result = GC_generic_malloc(size, GCKind[gc_type]);
        else
            result = GC_malloc(size);
    }
    if (TabFreeFuncBags[type])
        GC_register_finalizer_no_order(result, StandardFinalizer, NULL, NULL,
                                       NULL);
    return result;
}