Esempio n. 1
0
GC_API void GC_CALL GC_debug_register_finalizer_no_order
                                    (void * obj, GC_finalization_proc fn,
                                     void * cd, GC_finalization_proc *ofn,
                                     void * *ocd)
{
    GC_finalization_proc my_old_fn = OFN_UNSET;
    void * my_old_cd;
    ptr_t base = GC_base(obj);
    if (0 == base) {
        /* We won't collect it, hence finalizer wouldn't be run. */
        if (ocd) *ocd = 0;
        if (ofn) *ofn = 0;
        return;
    }
    if ((ptr_t)obj - base != sizeof(oh)) {
        GC_err_printf(
          "GC_debug_register_finalizer_no_order called with "
          "non-base-pointer %p\n",
          obj);
    }
    if (0 == fn) {
      GC_register_finalizer_no_order(base, 0, 0, &my_old_fn, &my_old_cd);
    } else {
      cd = GC_make_closure(fn, cd);
      if (cd == 0) return; /* out of memory */
      GC_register_finalizer_no_order(base, GC_debug_invoke_finalizer,
                                     cd, &my_old_fn, &my_old_cd);
    }
    store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd);
}
Esempio n. 2
0
testobj_t testobj_new(int model)
{
    testobj_t obj;
    switch (model) {
        case 0:
            obj = GC_MALLOC(sizeof(struct testobj_s));
            if (obj != NULL)
              GC_register_finalizer_no_order(obj, testobj_finalize,
                                             &free_count, NULL, NULL);
            break;
        case 1:
            obj = GC_finalized_malloc(sizeof(struct testobj_s), &fclos);
            break;
        case 2:
            obj = GC_MALLOC(sizeof(struct testobj_s));
            break;
        default:
            exit(-1);
    }
    if (obj == NULL) {
        fprintf(stderr, "Out of memory!\n");
        exit(3);
    }
    my_assert(obj->i == 0 && obj->keep_link == NULL);
    obj->i = 109;
    return obj;
}
Esempio n. 3
0
void GC_debug_register_finalizer_no_order
    				    (void * obj, GC_finalization_proc fn,
    				     void * cd, GC_finalization_proc *ofn,
				     void * *ocd)
{
    GC_finalization_proc my_old_fn;
    void * my_old_cd;
    ptr_t base = GC_base(obj);
    if (0 == base) return;
    if ((ptr_t)obj - base != sizeof(oh)) {
        GC_err_printf(
	  "GC_debug_register_finalizer_no_order called with "
	  "non-base-pointer %p\n",
	  obj);
    }
    if (0 == fn) {
      GC_register_finalizer_no_order(base, 0, 0, &my_old_fn, &my_old_cd);
    } else {
      GC_register_finalizer_no_order(base, GC_debug_invoke_finalizer,
    			    	     GC_make_closure(fn,cd), &my_old_fn,
				     &my_old_cd);
    }
    store_old(obj, my_old_fn, (struct closure *)my_old_cd, ofn, ocd);
}
Esempio n. 4
0
void *heapstats_alloc2(jsize length, jint isArray) {
  void *result;
  stat_t ttl;
  static int skipped=0;
  FLEX_MUTEX_DECLARE_STATIC(skipped_lock);
  /* do the allocation & register finalizer */
  result = GC_malloc(length);
  GC_register_finalizer_no_order(result, isArray ?
				 heapstats_finalizer_array :
				 heapstats_finalizer_object,
				 (GC_PTR) ((ptroff_t) length), NULL, NULL);
  /* (sometimes) collect all dead objects */
  FLEX_MUTEX_LOCK(&skipped_lock);
  if (skipped ||
      0 == ((FETCH_STATS(heap_total_alloc_obj_count) +
	     FETCH_STATS(heap_total_alloc_arr_count)) % GC_FREQUENCY))
    if (isArray ?
	(FETCH_STATS(heap_current_live_arr_bytes)+length)
	> FETCH_STATS(heap_max_live_arr_bytes) :
	(FETCH_STATS(heap_current_live_obj_bytes)+length)
	> FETCH_STATS(heap_max_live_obj_bytes)) {
      GC_gcollect(); skipped = 0;
    } else skipped = 1;
  FLEX_MUTEX_UNLOCK(&skipped_lock);
  if (isArray) {
    /* update total and current live */
    INCREMENT_STATS(heap_total_alloc_arr_count, 1);
    INCREMENT_STATS(heap_total_alloc_arr_bytes, length);
    INCREMENT_STATS(heap_current_live_arr_bytes, length);
    /* update max_live */
    ttl = FETCH_STATS(heap_current_live_arr_bytes);
    UPDATE_STATS(heap_max_live_arr_bytes,
		 ttl > _old_value_ ? ttl : _old_value_);
  } else {
    /* update total and current live */
    INCREMENT_STATS(heap_total_alloc_obj_count, 1);
    INCREMENT_STATS(heap_total_alloc_obj_bytes, length);
    INCREMENT_STATS(heap_current_live_obj_bytes, length);
    /* update max_live */
    ttl = FETCH_STATS(heap_current_live_obj_bytes);
    UPDATE_STATS(heap_max_live_obj_bytes,
		 ttl > _old_value_ ? ttl : _old_value_);
  }
  /* done */
  return result;
}
Esempio n. 5
0
File: boehm_gc.c Progetto: phs75/gap
/****************************************************************************
**
*F  AllocateBagMemory( <gc_type>, <type>, <size> )
**
**  Allocate memory for a new bag.
**
**  'AllocateBagMemory' is an auxiliary routine for the Boehm GC that
**  allocates memory from the appropriate pool. 'gc_type' is -1 if all words
**  in the bag can refer to other bags, 0 if the bag will not contain any
**  references to other bags, and > 0 to indicate a specific memory layout
**  descriptor.
**/
void * AllocateBagMemory(int gc_type, int type, UInt size)
{
    assert(gc_type >= -1);
    void * result = NULL;
    if (size <= TL_GC_SIZE) {
        UInt alloc_seg, alloc_size;
        alloc_size = (size + GRANULE_SIZE - 1) / GRANULE_SIZE;
        alloc_seg = TLAllocatorSeg[alloc_size];
        alloc_size = TLAllocatorSize[alloc_seg];
        void *** freeList = STATE(FreeList);
        if (!freeList[gc_type + 1]) {
            freeList[gc_type + 1] =
                GC_malloc(sizeof(void *) * TLAllocatorMaxSeg);
        }
        void ** freeListForType = freeList[gc_type + 1];
        result = freeListForType[alloc_seg];
        if (!result) {
            if (gc_type < 0)
                freeListForType[alloc_seg] = GC_malloc_many(alloc_size);
            else
                GC_generic_malloc_many(alloc_size, GCMKind[gc_type],
                                       &freeListForType[alloc_seg]);
            result = freeListForType[alloc_seg];
        }
        freeListForType[alloc_seg] = *(void **)result;
        memset(result, 0, alloc_size);
    }
    else {
        if (gc_type >= 0)
            result = GC_generic_malloc(size, GCKind[gc_type]);
        else
            result = GC_malloc(size);
    }
    if (TabFreeFuncBags[type])
        GC_register_finalizer_no_order(result, StandardFinalizer, NULL, NULL,
                                       NULL);
    return result;
}
Esempio n. 6
0
void gc_register_finalizer(void* obj) {
#ifdef WITH_LIBGC
	GC_register_finalizer_no_order(obj, &gc_finalize, NULL, NULL, NULL);
#endif
}
Esempio n. 7
0
void FNI_InflateObject(JNIEnv *env, jobject wrapped_obj) {
  struct oobj *obj = FNI_UNWRAP_MASKED(wrapped_obj);
  FLEX_MUTEX_LOCK(&global_inflate_mutex);
  /* be careful in case someone inflates this guy while our back is turned */
  if (!FNI_IS_INFLATED(wrapped_obj)) {
    /* all data in inflated_oobj is managed manually, so we can use malloc */
    struct inflated_oobj *infl = 
#if defined(WITH_TRANSACTIONS) && defined(BDW_CONSERVATIVE_GC)
#ifdef WITH_GC_STATS
      GC_malloc_uncollectable_stats
#else
      GC_malloc_uncollectable /* transactions stores version info here */
#endif
#else
	malloc
#endif
      (sizeof(*infl));
#if (!defined(WITH_TRANSACTIONS)) || (!defined(BDW_CONSERVATIVE_GC))
    INCREMENT_MEM_STATS(sizeof(*infl));
#endif
    /* initialize infl */
    memset(infl, 0, sizeof(*infl));
#ifndef WITH_HASHLOCK_SHRINK
    infl->hashcode = HASHCODE_MASK(obj->hashunion.hashcode);
#endif /* !WITH_HASHLOCK_SHRINK */
#if WITH_HEAVY_THREADS || WITH_PTH_THREADS || WITH_USER_THREADS
# ifdef ERROR_CHECKING_LOCKS
    /* error checking locks are slower, but catch more bugs (maybe) */
    { pthread_mutexattr_t attr; pthread_mutexattr_init(&attr);
      pthread_mutexattr_setkind_np(&attr, PTHREAD_MUTEX_ERRORCHECK_NP);
      pthread_mutex_init(&(infl->mutex), &attr);
      pthread_mutexattr_destroy(&attr);
    }
# else /* !ERROR_CHECKING_LOCKS */
    pthread_mutex_init(&(infl->mutex), NULL);
# endif /* ERROR_CHECKING_LOCKS || !ERROR_CHECKING_LOCKS */
    pthread_cond_init(&(infl->cond), NULL);
    pthread_rwlock_init(&(infl->jni_data_lock), NULL);
#endif
#ifndef WITH_HASHLOCK_SHRINK
#ifndef WITH_DYNAMIC_WB
    obj->hashunion.inflated = infl;
#else /* WITH_DYNAMIC_WB */
    obj->hashunion.inflated = (struct inflated_oobj *) 
	((ptroff_t) infl | (obj->hashunion.hashcode & 2));
#endif /* WITH_DYNAMIC_WB */
#else /* WITH_HASHLOCK_SHRINK */
    infl_table_set(INFL_LOCK, obj, infl, NULL);
#endif /* WITH_HASHLOCK_SHRINK */
    assert(FNI_IS_INFLATED(wrapped_obj));
#ifdef WITH_PRECISE_GC 
#if defined(WITH_REALTIME_JAVA) && defined(WITH_NOHEAP_SUPPORT)
    /* Can't inflate a heap reference in a NoHeapRealtimeThread */
    assert((!(((ptroff_t)FNI_UNWRAP(wrapped_obj))&1))||
	   (!((struct FNI_Thread_State*)env)->noheap));
    if (((ptroff_t)FNI_UNWRAP(wrapped_obj))&1)  /* register only if in heap */
#endif
      precise_register_inflated_obj(obj, 
#ifdef WITH_REALTIME_JAVA
				    (void (*)(jobject_unwrapped, ptroff_t))
#endif
				    deflate_object);
#elif defined(BDW_CONSERVATIVE_GC)
    /* register finalizer to deallocate inflated_oobj on gc */
    if (GC_base(obj)!=NULL) {// skip if this is not a heap-allocated object
        GC_register_finalizer_no_order(GC_base(obj), deflate_object,
			      (GC_PTR) ((void*)obj-(void*)GC_base(obj)),
			      &(infl->old_finalizer),
			      &(infl->old_client_data));
    } 
#endif
#ifdef WITH_REALTIME_JAVA
# ifdef BDW_CONSERVATIVE_GC /* XXX this test may be reversed? see v1.29 XXX */
    if (GC_base(obj)!=NULL) /* skip if this is not a heap-allocated object */
# endif /* BDW_CONSERVATIVE_GC */
    RTJ_register_finalizer(wrapped_obj, deflate_object); 
#endif
  }
  FLEX_MUTEX_UNLOCK(&global_inflate_mutex);
}