Ejemplo n.º 1
0
long scheme_count_closure(Scheme_Object **o, mzshort len, Scheme_Hash_Table *ht)
{
#if 0
  int i;
  int s = 0;

  for (i = 0; i < len; i++) {
    if (!scheme_lookup_in_table(ht, (const char *)o[i])) {
      scheme_hash_set(ht, o[i], scheme_true);
      if (GC_size(o[i]) == sizeof(Scheme_Object *)) {
	/* May be an environment box */
	Scheme_Object *d = *(Scheme_Object **)o[i];
	if (GC_size(d) >= sizeof(Scheme_Type)) {
	  /* Ok - probably it is a box. */
	  s += sizeof(Scheme_Object *);
	  s += scheme_count_memory(d, ht);
	} else {
	  /* Not an environment box. */
	  s += scheme_count_memory(o[i], ht);
	}
      } else {
	s += scheme_count_memory(o[i], ht);
      }
    }
  }

  return s;
#endif
  return 0;
}
Ejemplo n.º 2
0
/* This version assumes we do hold the allocation lock.		   */
ptr_t GC_store_debug_info_inner(ptr_t p, word sz, char *string, word integer)
{
    register word * result = (word *)((oh *)p + 1);
    
    /* There is some argument that we should disable signals here.	*/
    /* But that's expensive.  And this way things should only appear	*/
    /* inconsistent while we're in the handler.				*/
    GC_ASSERT(GC_size(p) >= sizeof(oh) + sz);
    GC_ASSERT(!(SMALL_OBJ(sz) && CROSSES_HBLK(p, sz)));
#   ifdef KEEP_BACK_PTRS
      ((oh *)p) -> oh_back_ptr = HIDE_BACK_PTR(NOT_MARKED);
#   endif
#   ifdef MAKE_BACK_GRAPH
      ((oh *)p) -> oh_bg_ptr = HIDE_BACK_PTR((ptr_t)0);
#   endif
    ((oh *)p) -> oh_string = string;
    ((oh *)p) -> oh_int = integer;
#   ifndef SHORT_DBG_HDRS
      ((oh *)p) -> oh_sz = sz;
      ((oh *)p) -> oh_sf = START_FLAG ^ (word)result;
      ((word *)p)[BYTES_TO_WORDS(GC_size(p))-1] =
         result[SIMPLE_ROUNDED_UP_WORDS(sz)] = END_FLAG ^ (word)result;
#   endif
    return((ptr_t)result);
}
Ejemplo n.º 3
0
void GC_debug_free(void * p)
{
    ptr_t base;
    ptr_t clobbered;
    
    if (0 == p) return;
    base = GC_base(p);
    if (base == 0) {
        GC_err_printf("Attempt to free invalid pointer %p\n", p);
        ABORT("free(invalid pointer)");
    }
    if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
        GC_err_printf(
        	  "GC_debug_free called on pointer %p wo debugging info\n", p);
    } else {
#     ifndef SHORT_DBG_HDRS
        clobbered = GC_check_annotated_obj((oh *)base);
        if (clobbered != 0) {
          if (((oh *)base) -> oh_sz == GC_size(base)) {
            GC_err_printf(
                  "GC_debug_free: found previously deallocated (?) object at ");
          } else {
            GC_err_printf("GC_debug_free: found smashed location at ");
          }
          GC_print_smashed_obj(p, clobbered);
        }
        /* Invalidate size */
        ((oh *)base) -> oh_sz = GC_size(base);
#     endif /* SHORT_DBG_HDRS */
    }
    if (GC_find_leak) {
        GC_free(base);
    } else {
	hdr * hhdr = HDR(p);
	GC_bool uncollectable = FALSE;

        if (hhdr ->  hb_obj_kind == UNCOLLECTABLE) {
	    uncollectable = TRUE;
	}
#	ifdef ATOMIC_UNCOLLECTABLE
	    if (hhdr ->  hb_obj_kind == AUNCOLLECTABLE) {
		    uncollectable = TRUE;
	    }
#	endif
	if (uncollectable) {
	    GC_free(base);
	} else {
	    size_t i;
	    size_t obj_sz = BYTES_TO_WORDS(hhdr -> hb_sz - sizeof(oh));

	    for (i = 0; i < obj_sz; ++i) ((word *)p)[i] = 0xdeadbeef;
	    GC_ASSERT((word *)p + i == (word *)(base + hhdr -> hb_sz));
	}
    } /* !GC_find_leak */
}
Ejemplo n.º 4
0
GC_INNER int GC_clone_finalizer(void * src_p, void * dest_p)
{
    struct finalizable_object * curr_fo;
    size_t index;
    ptr_t base, result;
    GC_finalization_proc fn;
    ptr_t cd;
    finalization_mark_proc mp;
    DCL_LOCK_STATE;

    base = (ptr_t)src_p;
    result = (ptr_t)dest_p;
    LOCK();
    index = HASH2(base, log_fo_table_size);
    curr_fo = GC_fo_head[index];
    while (curr_fo != 0) {
      GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
      if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(base)) {
          fn = curr_fo -> fo_fn;
          cd = curr_fo -> fo_client_data;
          mp = curr_fo -> fo_mark_proc;
          UNLOCK();
          return GC_register_finalizer_inner(result, fn, cd, 0, 0, mp);
      }
      curr_fo = fo_next(curr_fo);
    }
    UNLOCK();
    return TRUE;
}
Ejemplo n.º 5
0
void GC_default_print_heap_obj_proc(ptr_t p)
{
    ptr_t base = GC_base(p);

    GC_err_printf("start: %p, appr. length: %ld", base,
		  (unsigned long)GC_size(base));
}
Ejemplo n.º 6
0
void * GC_debug_realloc(void * p, size_t lb, GC_EXTRA_PARAMS)
{
    void * base = GC_base(p);
    ptr_t clobbered;
    void * result;
    size_t copy_sz = lb;
    size_t old_sz;
    hdr * hhdr;
    
    if (p == 0) return(GC_debug_malloc(lb, OPT_RA s, i));
    if (base == 0) {
        GC_err_printf("Attempt to reallocate invalid pointer %p\n", p);
        ABORT("realloc(invalid pointer)");
    }
    if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
        GC_err_printf(
        	"GC_debug_realloc called on pointer %p wo debugging info\n", p);
        return(GC_realloc(p, lb));
    }
    hhdr = HDR(base);
    switch (hhdr -> hb_obj_kind) {
#    ifdef STUBBORN_ALLOC
      case STUBBORN:
        result = GC_debug_malloc_stubborn(lb, OPT_RA s, i);
        break;
#    endif
      case NORMAL:
        result = GC_debug_malloc(lb, OPT_RA s, i);
        break;
      case PTRFREE:
        result = GC_debug_malloc_atomic(lb, OPT_RA s, i);
        break;
      case UNCOLLECTABLE:
	result = GC_debug_malloc_uncollectable(lb, OPT_RA s, i);
 	break;
#    ifdef ATOMIC_UNCOLLECTABLE
      case AUNCOLLECTABLE:
	result = GC_debug_malloc_atomic_uncollectable(lb, OPT_RA s, i);
	break;
#    endif
      default:
        GC_err_printf("GC_debug_realloc: encountered bad kind\n");
        ABORT("bad kind");
    }
#   ifdef SHORT_DBG_HDRS
      old_sz = GC_size(base) - sizeof(oh);
#   else
      clobbered = GC_check_annotated_obj((oh *)base);
      if (clobbered != 0) {
        GC_err_printf("GC_debug_realloc: found smashed location at ");
        GC_print_smashed_obj(p, clobbered);
      }
      old_sz = ((oh *)base) -> oh_sz;
#   endif
    if (old_sz < copy_sz) copy_sz = old_sz;
    if (result == 0) return(0);
    BCOPY(p, result,  copy_sz);
    GC_debug_free(p);
    return(result);
}
Ejemplo n.º 7
0
  /* clobbered_addr.                                                    */
  STATIC void GC_print_smashed_obj(const char *msg, ptr_t p,
                                   ptr_t clobbered_addr)
  {
    oh * ohdr = (oh *)GC_base(p);

    GC_ASSERT(I_DONT_HOLD_LOCK());
#   ifdef LINT2
      if (!ohdr) ABORT("Invalid GC_print_smashed_obj argument");
#   endif
    if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))
        || ohdr -> oh_string == 0) {
        GC_err_printf(
                "%s %p in or near object at %p(<smashed>, appr. sz = %lu)\n",
                msg, clobbered_addr, p,
                (unsigned long)(GC_size((ptr_t)ohdr) - DEBUG_BYTES));
    } else {
        GC_err_printf("%s %p in or near object at %p (%s:%d, sz=%lu)\n",
                msg, clobbered_addr, p,
                (word)(ohdr -> oh_string) < HBLKSIZE ? "(smashed string)" :
                ohdr -> oh_string[0] == '\0' ? "EMPTY(smashed?)" :
                                                ohdr -> oh_string,
                GET_OH_LINENUM(ohdr), (unsigned long)(ohdr -> oh_sz));
        PRINT_CALL_CHAIN(ohdr);
    }
  }
static void return_single_freelist(void *fl, void **gfl)
{
    void *q, **qptr;

    if (*gfl == 0) {
      *gfl = fl;
    } else {
      GC_ASSERT(GC_size(fl) == GC_size(*gfl));
      /* Concatenate: */
        qptr = &(obj_link(fl));
        while ((word)(q = *qptr) >= HBLKSIZE)
          qptr = &(obj_link(q));
        GC_ASSERT(0 == q);
        *qptr = *gfl;
        *gfl = fl;
    }
}
Ejemplo n.º 9
0
GC_API void * GC_CALL GC_debug_realloc(void * p, size_t lb, GC_EXTRA_PARAMS)
{
    void * base;
    void * result;
    hdr * hhdr;
    if (p == 0)
      return(GC_debug_malloc(lb, OPT_RA s, i));

    base = GC_base(p);
    if (base == 0) {
        GC_err_printf("Attempt to reallocate invalid pointer %p\n", p);
        ABORT("Invalid pointer passed to realloc()");
    }
    if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
        GC_err_printf(
              "GC_debug_realloc called on pointer %p w/o debugging info\n", p);
        return(GC_realloc(p, lb));
    }
    hhdr = HDR(base);
    switch (hhdr -> hb_obj_kind) {
#    ifdef STUBBORN_ALLOC
      case STUBBORN:
        result = GC_debug_malloc_stubborn(lb, OPT_RA s, i);
        break;
#    endif
      case NORMAL:
        result = GC_debug_malloc(lb, OPT_RA s, i);
        break;
      case PTRFREE:
        result = GC_debug_malloc_atomic(lb, OPT_RA s, i);
        break;
      case UNCOLLECTABLE:
        result = GC_debug_malloc_uncollectable(lb, OPT_RA s, i);
        break;
#    ifdef ATOMIC_UNCOLLECTABLE
      case AUNCOLLECTABLE:
        result = GC_debug_malloc_atomic_uncollectable(lb, OPT_RA s, i);
        break;
#    endif
      default:
        result = NULL; /* initialized to prevent warning. */
        GC_err_printf("GC_debug_realloc: encountered bad kind\n");
        ABORT("Bad kind");
    }

    if (result != NULL) {
      size_t old_sz;
#     ifdef SHORT_DBG_HDRS
        old_sz = GC_size(base) - sizeof(oh);
#     else
        old_sz = ((oh *)base) -> oh_sz;
#     endif
      BCOPY(p, result, old_sz < lb ? old_sz : lb);
      GC_debug_free(p);
    }
    return(result);
}
Ejemplo n.º 10
0
GC_API void GC_CALL GC_debug_free(void * p)
{
    ptr_t base;
    if (0 == p) return;

    base = GC_base(p);
    if (base == 0) {
      GC_err_printf("Attempt to free invalid pointer %p\n", p);
      ABORT("Invalid pointer passed to free()");
    }
    if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
      GC_err_printf(
               "GC_debug_free called on pointer %p w/o debugging info\n", p);
    } else {
#     ifndef SHORT_DBG_HDRS
        ptr_t clobbered = GC_check_annotated_obj((oh *)base);
        word sz = GC_size(base);
        if (clobbered != 0) {
          GC_have_errors = TRUE;
          if (((oh *)base) -> oh_sz == sz) {
            GC_print_smashed_obj(
                  "GC_debug_free: found previously deallocated (?) object at",
                  p, clobbered);
            return; /* ignore double free */
          } else {
            GC_print_smashed_obj("GC_debug_free: found smashed location at",
                                 p, clobbered);
          }
        }
        /* Invalidate size (mark the object as deallocated) */
        ((oh *)base) -> oh_sz = sz;
#     endif /* SHORT_DBG_HDRS */
    }
    if (GC_find_leak
#       ifndef SHORT_DBG_HDRS
          && ((ptr_t)p - (ptr_t)base != sizeof(oh) || !GC_findleak_delay_free)
#       endif
        ) {
      GC_free(base);
    } else {
      hdr * hhdr = HDR(p);
      if (hhdr -> hb_obj_kind == UNCOLLECTABLE
#         ifdef ATOMIC_UNCOLLECTABLE
            || hhdr -> hb_obj_kind == AUNCOLLECTABLE
#         endif
          ) {
        GC_free(base);
      } else {
        size_t i;
        size_t obj_sz = BYTES_TO_WORDS(hhdr -> hb_sz - sizeof(oh));

        for (i = 0; i < obj_sz; ++i)
          ((word *)p)[i] = GC_FREED_MEM_MARKER;
        GC_ASSERT((word *)p + i == (word *)(base + hhdr -> hb_sz));
      }
    } /* !GC_find_leak */
}
Ejemplo n.º 11
0
GC_INNER void GC_default_print_heap_obj_proc(ptr_t p)
{
    ptr_t base = (ptr_t)GC_base(p);
    int kind = HDR(base)->hb_obj_kind;

    GC_err_printf("object at %p of appr. %lu bytes (%s)\n",
                  (void *)base, (unsigned long)GC_size(base),
                  kind == PTRFREE ? "atomic" :
                    IS_UNCOLLECTABLE(kind) ? "uncollectable" : "composite");
}
Ejemplo n.º 12
0
Archivo: eval3.c Proyecto: NV/nile
static oop arrayAt(oop array, int index)
{
  if (is(Array, array)) {
    oop elts= get(array, Array,_array);
    int size= GC_size(elts) / sizeof(oop);
    if ((unsigned)index < (unsigned)size)
      return ((oop *)elts)[index];
  }
  return nil;
}
Ejemplo n.º 13
0
  /* Used internally; we assume it's called correctly.    */
  GC_INNER void GC_debug_free_inner(void * p)
  {
    ptr_t base = GC_base(p);
    GC_ASSERT((ptr_t)p - (ptr_t)base == sizeof(oh));
#   ifdef LINT2
      if (!base) ABORT("Invalid GC_debug_free_inner argument");
#   endif
#   ifndef SHORT_DBG_HDRS
      /* Invalidate size */
      ((oh *)base) -> oh_sz = GC_size(base);
#   endif
    GC_free_inner(base);
  }
Ejemplo n.º 14
0
/* This version assumes we do hold the allocation lock.            */
STATIC ptr_t GC_store_debug_info_inner(ptr_t p, word sz, const char *string,
                                       int linenum)
{
    word * result = (word *)((oh *)p + 1);

    GC_ASSERT(GC_size(p) >= sizeof(oh) + sz);
    GC_ASSERT(!(SMALL_OBJ(sz) && CROSSES_HBLK(p, sz)));
#   ifdef KEEP_BACK_PTRS
      ((oh *)p) -> oh_back_ptr = HIDE_BACK_PTR(NOT_MARKED);
#   endif
#   ifdef MAKE_BACK_GRAPH
      ((oh *)p) -> oh_bg_ptr = HIDE_BACK_PTR((ptr_t)0);
#   endif
    ((oh *)p) -> oh_string = string;
    ((oh *)p) -> oh_int = (word)linenum;
#   ifndef SHORT_DBG_HDRS
      ((oh *)p) -> oh_sz = sz;
      ((oh *)p) -> oh_sf = START_FLAG ^ (word)result;
      ((word *)p)[BYTES_TO_WORDS(GC_size(p))-1] =
         result[SIMPLE_ROUNDED_UP_WORDS(sz)] = END_FLAG ^ (word)result;
#   endif
    return((ptr_t)result);
}
Ejemplo n.º 15
0
Archivo: eval3.c Proyecto: NV/nile
static oop arrayAtPut(oop array, int index, oop val)
{
  if (is(Array, array)) {
    oop elts= get(array, Array,_array);
    int size= GC_size(elts) / sizeof(oop);
    if ((unsigned)index >= (unsigned)size) {
      oop oops= _newOops(_Array, sizeof(oop) * (index + 1));
      memcpy((oop *)oops, (oop *)elts, size * sizeof(oop));
      elts= set(array, Array,_array, oops);
    }
    return ((oop *)elts)[index]= val;
  }
  return nil;
}
Ejemplo n.º 16
0
static struct GC_ms_entry *
mark_weak_value_table (GC_word *addr, struct GC_ms_entry *mark_stack_ptr,
                       struct GC_ms_entry *mark_stack_limit, GC_word env)
{
  scm_t_weak_entry *entries = (scm_t_weak_entry*) addr;
  unsigned long k, size = GC_size (addr) / sizeof (scm_t_weak_entry);

  for (k = 0; k < size; k++)
    if (entries[k].hash && entries[k].value)
      {
        SCM key = SCM_PACK (entries[k].key);
        mark_stack_ptr = GC_MARK_AND_PUSH ((GC_word*) SCM2PTR (key),
                                           mark_stack_ptr, mark_stack_limit,
                                           NULL);
      }

  return mark_stack_ptr;
}
Ejemplo n.º 17
0
  STATIC void GC_print_blacklisted_ptr(word p, ptr_t source,
                                       const char *kind_str)
  {
    ptr_t base = (ptr_t)GC_base(source);

    if (0 == base) {
        GC_err_printf("Black listing (%s) %p referenced from %p in %s\n",
                      kind_str, (void *)p, (void *)source,
                      NULL != source ? "root set" : "register");
    } else {
        /* FIXME: We can't call the debug version of GC_print_heap_obj  */
        /* (with PRINT_CALL_CHAIN) here because the lock is held and    */
        /* the world is stopped.                                        */
        GC_err_printf("Black listing (%s) %p referenced from %p in"
                      " object at %p of appr. %lu bytes\n",
                      kind_str, (void *)p, (void *)source,
                      (void *)base, (unsigned long)GC_size(base));
    }
  }
Ejemplo n.º 18
0
  /* marked as deallocated.                                       */
  GC_INNER int GC_has_other_debug_info(ptr_t p)
  {
    ptr_t body = (ptr_t)((oh *)p + 1);
    word sz = GC_size(p);

    if (HBLKPTR(p) != HBLKPTR((ptr_t)body)
        || sz < DEBUG_BYTES + EXTRA_BYTES) {
      return 0;
    }
    if (((oh *)p) -> oh_sf != (START_FLAG ^ (word)body)
        && ((word *)p)[BYTES_TO_WORDS(sz)-1] != (END_FLAG ^ (word)body)) {
      return 0;
    }
    if (((oh *)p)->oh_sz == sz) {
      /* Object may have had debug info, but has been deallocated     */
      return -1;
    }
    return 1;
  }
Ejemplo n.º 19
0
 /* address.                                           */
 STATIC ptr_t GC_check_annotated_obj(oh *ohdr)
 {
   ptr_t body = (ptr_t)(ohdr + 1);
   word gc_sz = GC_size((ptr_t)ohdr);
   if (ohdr -> oh_sz + DEBUG_BYTES > gc_sz) {
       return((ptr_t)(&(ohdr -> oh_sz)));
   }
   if (ohdr -> oh_sf != (START_FLAG ^ (word)body)) {
       return((ptr_t)(&(ohdr -> oh_sf)));
   }
   if (((word *)ohdr)[BYTES_TO_WORDS(gc_sz)-1] != (END_FLAG ^ (word)body)) {
       return((ptr_t)((word *)ohdr + BYTES_TO_WORDS(gc_sz)-1));
   }
   if (((word *)body)[SIMPLE_ROUNDED_UP_WORDS(ohdr -> oh_sz)]
       != (END_FLAG ^ (word)body)) {
       return((ptr_t)((word *)body + SIMPLE_ROUNDED_UP_WORDS(ohdr->oh_sz)));
   }
   return(0);
 }
Ejemplo n.º 20
0
  GC_API void * GC_CALL GC_finalized_malloc(size_t lb,
                                const struct GC_finalizer_closure *fclos)
#endif
{
    ptr_t op;
    word lg;
    DCL_LOCK_STATE;

    lb += sizeof(void *);
    GC_ASSERT(done_init);
    if (SMALL_OBJ(lb)) {
        GC_DBG_COLLECT_AT_MALLOC(lb);
        lg = GC_size_map[lb];
        LOCK();
        op = GC_finalized_objfreelist[lg];
        if (EXPECT(0 == op, FALSE)) {
            UNLOCK();
            op = GC_generic_malloc(lb, GC_finalized_kind);
            if (NULL == op)
                return NULL;
            /* GC_generic_malloc has extended the size map for us.      */
            lg = GC_size_map[lb];
        } else {
            GC_finalized_objfreelist[lg] = obj_link(op);
            obj_link(op) = 0;
            GC_bytes_allocd += GRANULES_TO_BYTES(lg);
            UNLOCK();
        }
        GC_ASSERT(lg > 0);
        ((const void **)op)[GRANULES_TO_WORDS(lg) - 1] = fclos;
    } else {
        size_t op_sz;

        op = GC_generic_malloc(lb, GC_finalized_kind);
        if (NULL == op)
            return NULL;
        op_sz = GC_size(op);
        GC_ASSERT(op_sz >= lb);
        ((const void **)op)[op_sz / sizeof(void *) - 1] = fclos;
    }
    return GC_clear_stack(op);
}
Ejemplo n.º 21
0
/* its part.							*/
GC_bool GC_has_other_debug_info(ptr_t p)
{
    register oh * ohdr = (oh *)p;
    register ptr_t body = (ptr_t)(ohdr + 1);
    register word sz = GC_size((ptr_t) ohdr);
    
    if (HBLKPTR((ptr_t)ohdr) != HBLKPTR((ptr_t)body)
        || sz < DEBUG_BYTES + EXTRA_BYTES) {
        return(FALSE);
    }
    if (ohdr -> oh_sz == sz) {
    	/* Object may have had debug info, but has been deallocated	*/
    	return(FALSE);
    }
    if (ohdr -> oh_sf == (START_FLAG ^ (word)body)) return(TRUE);
    if (((word *)ohdr)[BYTES_TO_WORDS(sz)-1] == (END_FLAG ^ (word)body)) {
        return(TRUE);
    }
    return(FALSE);
}
Ejemplo n.º 22
0
STATIC int GC_CALLBACK GC_finalized_disclaim(void *obj)
{
    void **fc_addr;
    const struct GC_finalizer_closure *fc;

    fc_addr = &((void **)obj)[GC_size(obj) / sizeof(void *) - 1];
    fc = *fc_addr;
    if (fc != NULL) {
       /* [1] The disclaim function may be passed fragments from the    */
       /* free-list, on which it should not run finalization.           */
       /* To recognize this case, we use the fact that the first word   */
       /* on such fragments are always even (a link to the next         */
       /* fragment, or NULL).  If it is desirable to have a finalizer   */
       /* which does not use the first word for storing finalization    */
       /* info, GC_reclaim_with_finalization must be extended to clear  */
       /* fragments so that the assumption holds for the selected word. */
        (*fc->proc)(obj, fc->cd);
        *fc_addr = NULL;
    }
    return 0;
}
Ejemplo n.º 23
0
void GC_print_smashed_obj(ptr_t p, ptr_t clobbered_addr)
{
    register oh * ohdr = (oh *)GC_base(p);
    
    GC_ASSERT(I_DONT_HOLD_LOCK());
    GC_err_printf("%p in object at %p(", clobbered_addr, p);
    if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))
        || ohdr -> oh_string == 0) {
        GC_err_printf("<smashed>, appr. sz = %ld)\n",
        	       (GC_size((ptr_t)ohdr) - DEBUG_BYTES));
    } else {
        if (ohdr -> oh_string[0] == '\0') {
            GC_err_puts("EMPTY(smashed?)");
        } else {
            GC_err_puts(ohdr -> oh_string);
        }
        GC_err_printf(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
        			          (unsigned long)(ohdr -> oh_sz));
        PRINT_CALL_CHAIN(ohdr);
    }
}
Ejemplo n.º 24
0
/* finalized when this finalizer is invoked.                    */
STATIC void GC_register_finalizer_inner(void * obj,
                                        GC_finalization_proc fn, void *cd,
                                        GC_finalization_proc *ofn, void **ocd,
                                        finalization_mark_proc mp)
{
    ptr_t base;
    struct finalizable_object * curr_fo, * prev_fo;
    size_t index;
    struct finalizable_object *new_fo = 0;
    hdr *hhdr = NULL; /* initialized to prevent warning. */
    GC_oom_func oom_fn;
    DCL_LOCK_STATE;

    LOCK();
    if (log_fo_table_size == -1
        || GC_fo_entries > ((word)1 << log_fo_table_size)) {
        GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
                      &log_fo_table_size);
        GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
                           1 << (unsigned)log_fo_table_size);
    }
    /* in the THREADS case we hold allocation lock.             */
    base = (ptr_t)obj;
    for (;;) {
      index = HASH2(base, log_fo_table_size);
      prev_fo = 0;
      curr_fo = GC_fnlz_roots.fo_head[index];
      while (curr_fo != 0) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(base)) {
          /* Interruption by a signal in the middle of this     */
          /* should be safe.  The client may see only *ocd      */
          /* updated, but we'll declare that to be his problem. */
          if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
          if (ofn) *ofn = curr_fo -> fo_fn;
          /* Delete the structure for base. */
          if (prev_fo == 0) {
            GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
          } else {
            fo_set_next(prev_fo, fo_next(curr_fo));
          }
          if (fn == 0) {
            GC_fo_entries--;
            /* May not happen if we get a signal.  But a high   */
            /* estimate will only make the table larger than    */
            /* necessary.                                       */
#           if !defined(THREADS) && !defined(DBG_HDRS_ALL)
              GC_free((void *)curr_fo);
#           endif
          } else {
            curr_fo -> fo_fn = fn;
            curr_fo -> fo_client_data = (ptr_t)cd;
            curr_fo -> fo_mark_proc = mp;
            /* Reinsert it.  We deleted it first to maintain    */
            /* consistency in the event of a signal.            */
            if (prev_fo == 0) {
              GC_fnlz_roots.fo_head[index] = curr_fo;
            } else {
              fo_set_next(prev_fo, curr_fo);
            }
          }
          UNLOCK();
#         ifndef DBG_HDRS_ALL
            if (EXPECT(new_fo != 0, FALSE)) {
              /* Free unused new_fo returned by GC_oom_fn() */
              GC_free((void *)new_fo);
            }
#         endif
          return;
        }
        prev_fo = curr_fo;
        curr_fo = fo_next(curr_fo);
      }
      if (EXPECT(new_fo != 0, FALSE)) {
        /* new_fo is returned by GC_oom_fn(), so fn != 0 and hhdr != 0. */
        break;
      }
      if (fn == 0) {
        if (ocd) *ocd = 0;
        if (ofn) *ofn = 0;
        UNLOCK();
        return;
      }
      GET_HDR(base, hhdr);
      if (EXPECT(0 == hhdr, FALSE)) {
        /* We won't collect it, hence finalizer wouldn't be run. */
        if (ocd) *ocd = 0;
        if (ofn) *ofn = 0;
        UNLOCK();
        return;
      }
      new_fo = (struct finalizable_object *)
        GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
      if (EXPECT(new_fo != 0, TRUE))
        break;
      oom_fn = GC_oom_fn;
      UNLOCK();
      new_fo = (struct finalizable_object *)
                (*oom_fn)(sizeof(struct finalizable_object));
      if (0 == new_fo) {
        /* No enough memory.  *ocd and *ofn remains unchanged.  */
        return;
      }
      /* It's not likely we'll make it here, but ... */
      LOCK();
      /* Recalculate index since the table may grow and         */
      /* check again that our finalizer is not in the table.    */
    }
    GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
    if (ocd) *ocd = 0;
    if (ofn) *ofn = 0;
    new_fo -> fo_hidden_base = GC_HIDE_POINTER(base);
    new_fo -> fo_fn = fn;
    new_fo -> fo_client_data = (ptr_t)cd;
    new_fo -> fo_object_size = hhdr -> hb_sz;
    new_fo -> fo_mark_proc = mp;
    fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
    GC_fo_entries++;
    GC_fnlz_roots.fo_head[index] = new_fo;
    UNLOCK();
}
Ejemplo n.º 25
0
Archivo: eval3.c Proyecto: NV/nile
static int arrayLength(oop obj)
{
  if (is(Array, obj))
    return GC_size(get(obj, Array,_array)) / sizeof(oop);
  return 0;
}
Ejemplo n.º 26
0
/* enqueued for finalization.                                           */
GC_INNER void GC_finalize(void)
{
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr;
    size_t i;
    size_t fo_size = log_fo_table_size == -1 ? 0 :
                                (size_t)1 << log_fo_table_size;

#   ifndef SMALL_CONFIG
      /* Save current GC_[dl/ll]_entries value for stats printing */
      GC_old_dl_entries = GC_dl_hashtbl.entries;
#     ifndef GC_LONG_REFS_NOT_NEEDED
        GC_old_ll_entries = GC_ll_hashtbl.entries;
#     endif
#   endif

#   ifndef GC_TOGGLE_REFS_NOT_NEEDED
      GC_mark_togglerefs();
#   endif
    GC_make_disappearing_links_disappear(&GC_dl_hashtbl);

  /* Mark all objects reachable via chains of 1 or more pointers        */
  /* from finalizable objects.                                          */
    GC_ASSERT(GC_mark_state == MS_NONE);
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = GC_fnlz_roots.fo_head[i];
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            GC_MARKED_FOR_FINALIZATION(real_ptr);
            GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
            if (GC_is_marked(real_ptr)) {
                WARN("Finalization cycle involving %p\n", real_ptr);
            }
        }
      }
    }
  /* Enqueue for finalization all objects that are still                */
  /* unreachable.                                                       */
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = GC_fnlz_roots.fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            if (!GC_java_finalization) {
              GC_set_mark_bit(real_ptr);
            }
            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (NULL == prev_fo) {
                GC_fnlz_roots.fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
              if (GC_object_finalized_proc)
                GC_object_finalized_proc(real_ptr);

            /* Add to list of objects awaiting finalization.    */
              fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
              GC_fnlz_roots.finalize_now = curr_fo;
              /* unhide object pointer so any future collections will   */
              /* see it.                                                */
              curr_fo -> fo_hidden_base =
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                        curr_fo -> fo_object_size
                        + sizeof(struct finalizable_object);
            GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

  if (GC_java_finalization) {
    /* make sure we mark everything reachable from objects finalized
       using the no_order mark_proc */
      for (curr_fo = GC_fnlz_roots.finalize_now;
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
        real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
        if (!GC_is_marked(real_ptr)) {
            if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
                GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
            }
            if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
                GC_set_mark_bit(real_ptr);
            }
        }
      }

    /* now revive finalize-when-unreachable objects reachable from
       other finalizable objects */
      if (need_unreachable_finalization) {
        curr_fo = GC_fnlz_roots.finalize_now;
        prev_fo = NULL;
        while (curr_fo != NULL) {
          next_fo = fo_next(curr_fo);
          if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
            real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
            if (!GC_is_marked(real_ptr)) {
              GC_set_mark_bit(real_ptr);
            } else {
              if (NULL == prev_fo) {
                GC_fnlz_roots.finalize_now = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              curr_fo -> fo_hidden_base =
                                GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized -=
                  curr_fo->fo_object_size + sizeof(struct finalizable_object);

              i = HASH2(real_ptr, log_fo_table_size);
              fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
              GC_fo_entries++;
              GC_fnlz_roots.fo_head[i] = curr_fo;
              curr_fo = prev_fo;
            }
          }
          prev_fo = curr_fo;
          curr_fo = next_fo;
        }
      }
  }

  GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
# ifndef GC_TOGGLE_REFS_NOT_NEEDED
    GC_clear_togglerefs();
# endif
# ifndef GC_LONG_REFS_NOT_NEEDED
    GC_make_disappearing_links_disappear(&GC_ll_hashtbl);
    GC_remove_dangling_disappearing_links(&GC_ll_hashtbl);
# endif

  if (GC_fail_count) {
    /* Don't prevent running finalizers if there has been an allocation */
    /* failure recently.                                                */
#   ifdef THREADS
      GC_reset_finalizer_nested();
#   else
      GC_finalizer_nested = 0;
#   endif
  }
}
Ejemplo n.º 27
0
/* and invoke finalizers.						*/
void GC_finalize(void)
{
    struct disappearing_link * curr_dl, * prev_dl, * next_dl;
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr, real_link;
    size_t i;
    size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
    size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
    
  /* Make disappearing links disappear */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
        real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
        if (!GC_is_marked(real_ptr)) {
            *(word *)real_link = 0;
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }
  /* Mark all objects reachable via chains of 1 or more pointers	*/
  /* from finalizable objects.						*/
    GC_ASSERT(GC_mark_state == MS_NONE);
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
	    GC_MARKED_FOR_FINALIZATION(real_ptr);
            GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
            if (GC_is_marked(real_ptr)) {
                WARN("Finalization cycle involving %lx\n", real_ptr);
            }
        }
      }
    }
  /* Enqueue for finalization all objects that are still		*/
  /* unreachable.							*/
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
	    if (!GC_java_finalization) {
              GC_set_mark_bit(real_ptr);
	    }
            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (prev_fo == 0) {
                fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
            /* Add to list of objects awaiting finalization.	*/
              fo_set_next(curr_fo, GC_finalize_now);
              GC_finalize_now = curr_fo;
              /* unhide object pointer so any future collections will	*/
              /* see it.						*/
              curr_fo -> fo_hidden_base = 
              		(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                 	curr_fo -> fo_object_size
              		+ sizeof(struct finalizable_object);
	    GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

  if (GC_java_finalization) {
    /* make sure we mark everything reachable from objects finalized
       using the no_order mark_proc */
      for (curr_fo = GC_finalize_now; 
  	 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
  	real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
  	if (!GC_is_marked(real_ptr)) {
  	    if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
  	        GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
  	    }
	    if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
		GC_set_mark_bit(real_ptr);
	    }
  	}
      }

    /* now revive finalize-when-unreachable objects reachable from
       other finalizable objects */
      if (need_unreachable_finalization) {
        curr_fo = GC_finalize_now;
        prev_fo = 0;
        while (curr_fo != 0) {
	  next_fo = fo_next(curr_fo);
	  if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
	    real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
	    if (!GC_is_marked(real_ptr)) {
	      GC_set_mark_bit(real_ptr);
	    } else {
	      if (prev_fo == 0)
		GC_finalize_now = next_fo;
	      else
		fo_set_next(prev_fo, next_fo);

              curr_fo -> fo_hidden_base =
              		(word) HIDE_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized -=
                 	curr_fo -> fo_object_size + sizeof(struct finalizable_object);

	      i = HASH2(real_ptr, log_fo_table_size);
	      fo_set_next (curr_fo, fo_head[i]);
	      GC_fo_entries++;
	      fo_head[i] = curr_fo;
	      curr_fo = prev_fo;
	    }
	  }
	  prev_fo = curr_fo;
	  curr_fo = next_fo;
        }
      }
  }

  /* Remove dangling disappearing links. */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
        real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
        if (real_link != 0 && !GC_is_marked(real_link)) {
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }
}
Ejemplo n.º 28
0
/* finalized when this finalizer is invoked.			*/
GC_API void GC_register_finalizer_inner(void * obj,
					GC_finalization_proc fn, void *cd,
					GC_finalization_proc *ofn, void **ocd,
					finalization_mark_proc mp)
{
    ptr_t base;
    struct finalizable_object * curr_fo, * prev_fo;
    size_t index;
    struct finalizable_object *new_fo;
    hdr *hhdr;
    DCL_LOCK_STATE;

#   ifdef THREADS
	LOCK();
#   endif
    if (log_fo_table_size == -1
        || GC_fo_entries > ((word)1 << log_fo_table_size)) {
    	GC_grow_table((struct hash_chain_entry ***)(&fo_head),
    		      &log_fo_table_size);
	if (GC_print_stats) {
	    GC_log_printf("Grew fo table to %u entries\n",
	    	          (1 << log_fo_table_size));
	}
    }
    /* in the THREADS case signals are disabled and we hold allocation	*/
    /* lock; otherwise neither is true.  Proceed carefully.		*/
    base = (ptr_t)obj;
    index = HASH2(base, log_fo_table_size);
    prev_fo = 0; curr_fo = fo_head[index];
    while (curr_fo != 0) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
            /* Interruption by a signal in the middle of this	*/
            /* should be safe.  The client may see only *ocd	*/
            /* updated, but we'll declare that to be his	*/
            /* problem.						*/
            if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
            if (ofn) *ofn = curr_fo -> fo_fn;
            /* Delete the structure for base. */
                if (prev_fo == 0) {
                  fo_head[index] = fo_next(curr_fo);
                } else {
                  fo_set_next(prev_fo, fo_next(curr_fo));
                }
            if (fn == 0) {
                GC_fo_entries--;
                  /* May not happen if we get a signal.  But a high	*/
                  /* estimate will only make the table larger than	*/
                  /* necessary.						*/
#		if !defined(THREADS) && !defined(DBG_HDRS_ALL)
                  GC_free((void *)curr_fo);
#		endif
            } else {
                curr_fo -> fo_fn = fn;
                curr_fo -> fo_client_data = (ptr_t)cd;
                curr_fo -> fo_mark_proc = mp;
		/* Reinsert it.  We deleted it first to maintain	*/
		/* consistency in the event of a signal.		*/
		if (prev_fo == 0) {
                  fo_head[index] = curr_fo;
                } else {
                  fo_set_next(prev_fo, curr_fo);
                }
            }
#	    ifdef THREADS
                UNLOCK();
#	    endif
            return;
        }
        prev_fo = curr_fo;
        curr_fo = fo_next(curr_fo);
    }
    if (ofn) *ofn = 0;
    if (ocd) *ocd = 0;
    if (fn == 0) {
#	ifdef THREADS
            UNLOCK();
#	endif
        return;
    }
    GET_HDR(base, hhdr);
    if (0 == hhdr) {
      /* We won't collect it, hence finalizer wouldn't be run. */
#     ifdef THREADS
          UNLOCK();
#     endif
      return;
    }
    new_fo = (struct finalizable_object *)
    	GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
    if (EXPECT(0 == new_fo, FALSE)) {
#     ifdef THREADS
	UNLOCK();
#     endif
      new_fo = (struct finalizable_object *)
	      GC_oom_fn(sizeof(struct finalizable_object));
      if (0 == new_fo) {
	GC_finalization_failures++;
	return;
      }
      /* It's not likely we'll make it here, but ... */
#     ifdef THREADS
	LOCK();
#     endif
    }
    GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
    new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
    new_fo -> fo_fn = fn;
    new_fo -> fo_client_data = (ptr_t)cd;
    new_fo -> fo_object_size = hhdr -> hb_sz;
    new_fo -> fo_mark_proc = mp;
    fo_set_next(new_fo, fo_head[index]);
    GC_fo_entries++;
    fo_head[index] = new_fo;
#   ifdef THREADS
        UNLOCK();
#   endif
}
Ejemplo n.º 29
0
GC_API void * GC_CALL GC_debug_realloc(void * p, size_t lb, GC_EXTRA_PARAMS)
{
    void * base;
    void * result;
    hdr * hhdr;

    if (p == 0) {
      return GC_debug_malloc(lb, OPT_RA s, i);
    }
    if (0 == lb) /* and p != NULL */ {
      GC_debug_free(p);
      return NULL;
    }

#   ifdef GC_ADD_CALLER
      if (s == NULL) {
        GC_caller_func_offset(ra, &s, &i);
      }
#   endif
    base = GC_base(p);
    if (base == 0) {
        ABORT_ARG1("Invalid pointer passed to realloc()", ": %p", p);
    }
    if ((ptr_t)p - (ptr_t)base != sizeof(oh)) {
        GC_err_printf(
              "GC_debug_realloc called on pointer %p w/o debugging info\n", p);
        return(GC_realloc(p, lb));
    }
    hhdr = HDR(base);
    switch (hhdr -> hb_obj_kind) {
      case NORMAL:
        result = GC_debug_malloc(lb, OPT_RA s, i);
        break;
      case PTRFREE:
        result = GC_debug_malloc_atomic(lb, OPT_RA s, i);
        break;
      case UNCOLLECTABLE:
        result = GC_debug_malloc_uncollectable(lb, OPT_RA s, i);
        break;
#    ifdef GC_ATOMIC_UNCOLLECTABLE
      case AUNCOLLECTABLE:
        result = GC_debug_malloc_atomic_uncollectable(lb, OPT_RA s, i);
        break;
#    endif
      default:
        result = NULL; /* initialized to prevent warning. */
        ABORT_RET("GC_debug_realloc: encountered bad kind");
    }

    if (result != NULL) {
      size_t old_sz;
#     ifdef SHORT_DBG_HDRS
        old_sz = GC_size(base) - sizeof(oh);
#     else
        old_sz = ((oh *)base) -> oh_sz;
#     endif
      if (old_sz > 0)
        BCOPY(p, result, old_sz < lb ? old_sz : lb);
      GC_debug_free(p);
    }
    return(result);
}
Ejemplo n.º 30
0
Archivo: boehm_gc.c Proyecto: phs75/gap
UInt ResizeBag(Bag bag, UInt new_size)
{
    UInt  type; /* type of the bag                 */
    UInt  flags;
    UInt  old_size; /* old size of the bag             */
    Bag * src;      /* source in copying               */
    UInt  alloc_size;

/* check the size                                                      */

#ifdef TREMBLE_HEAP
    CollectBags(0, 0);
#endif

    BagHeader * header = BAG_HEADER(bag);

    /* get type and old size of the bag                                    */
    type = header->type;
    flags = header->flags;
    old_size = header->size;

#ifdef COUNT_BAGS
    /* update the statistics                                               */
    InfoBags[type].sizeLive += new_size - old_size;
    InfoBags[type].sizeAll += new_size - old_size;
#endif
    SizeAllBags += new_size - old_size;

#ifndef DISABLE_GC
    alloc_size = GC_size(header);
    /* An alternative implementation would be to compare
     * new_size <= alloc_size in the following test in order
     * to avoid reallocations for alternating contractions
     * and expansions. However, typed allocation in the Boehm
     * GC stores layout information in the last word of a memory
     * block and we may accidentally overwrite this information,
     * because GC_size() includes that extraneous word when
     * returning the size of a memory block.
     *
     * This is technically a bug in GC_size(), but until and
     * unless there is an upstream fix, we'll do it the safe
     * way.
     */
    if (new_size <= old_size &&
        sizeof(BagHeader) + new_size >= alloc_size * 3 / 4) {
#else
    if (new_size <= old_size) {
#endif /* DISABLE_GC */

        /* change the size word                                            */
        header->size = new_size;
    }

    /* if the bag is enlarged                                              */
    else {
        alloc_size = sizeof(BagHeader) + new_size;
        if (new_size == 0)
            alloc_size++;
#ifndef DISABLE_GC
        header = AllocateBagMemory(TabMarkTypeBags[type], type, alloc_size);
#else
        header = calloc(1, alloc_size);
#endif

        header->type = type;
        header->flags = flags;
        header->size = new_size;

        // copy data and update the masterpointer
        src = PTR_BAG(bag);
        memcpy(DATA(header), src, old_size < new_size ? old_size : new_size);
        SET_PTR_BAG(bag, DATA(header));
    }
    /* return success                                                      */
    return 1;
}


/*****************************************************************************
** The following functions are not required respectively supported, so empty
** implementations are provided
**
*/

void InitGlobalBag(Bag * addr, const Char * cookie)
{
}

void SwapMasterPoint(Bag bag1, Bag bag2)
{
    Obj * ptr1 = PTR_BAG(bag1);
    Obj * ptr2 = PTR_BAG(bag2);
    SET_PTR_BAG(bag1, ptr2);
    SET_PTR_BAG(bag2, ptr1);
}