Ejemplo n.º 1
0
GC_API void * GC_CALL GC_malloc_precise(size_t lb)
{
    void *op;
    void **opp;
    size_t lg;
    DCL_LOCK_STATE;

    if(SMALL_OBJ(lb)) {
        lg = GC_size_map[lb];
        opp = (void **)&(GC_pobjfreelist[lg]);
        LOCK();
        if (EXPECT((op = *opp) == 0, FALSE)) {
            UNLOCK();
            return (GENERAL_MALLOC((word)lb, PRECISE));
        }
        GC_ASSERT(0 == obj_link(op)
                  || ((word)obj_link(op)
                        <= (word)GC_greatest_plausible_heap_addr
                     && (word)obj_link(op)
                        >= (word)GC_least_plausible_heap_addr));
        *opp = obj_link(op);
        obj_link(op) = 0;
        GC_bytes_allocd += GRANULES_TO_BYTES(lg);
        UNLOCK();
        return op;
   } else {
       return(GENERAL_MALLOC(lb, PRECISE));
   }
}
Ejemplo n.º 2
0
/* hold lock:                                   */
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
{
    void *op;

    if(SMALL_OBJ(lb)) {
        struct obj_kind * kind = GC_obj_kinds + k;
        size_t lg = GC_size_map[lb];
        void ** opp = &(kind -> ok_freelist[lg]);

        if( (op = *opp) == 0 ) {
            if (GC_size_map[lb] == 0) {
              if (!GC_is_initialized) GC_init();
              if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
              return(GC_generic_malloc_inner(lb, k));
            }
            if (kind -> ok_reclaim_list == 0) {
                if (!GC_alloc_reclaim_list(kind)) goto out;
            }
            op = GC_allocobj(lg, k);
            if (op == 0) goto out;
        }
        *opp = obj_link(op);
        obj_link(op) = 0;
        GC_bytes_allocd += GRANULES_TO_BYTES(lg);
    } else {
        op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
        GC_bytes_allocd += lb;
    }

out:
    return op;
}
Ejemplo n.º 3
0
/* we have not yet allocated.                                           */
GC_INNER ptr_t GC_build_fl(struct hblk *h, size_t sz, GC_bool clear,
                           ptr_t list)
{
  word *p, *prev;
  word *last_object;            /* points to last object in new hblk    */

  /* Do a few prefetches here, just because its cheap.          */
  /* If we were more serious about it, these should go inside   */
  /* the loops.  But write prefetches usually don't seem to     */
  /* matter much.                                               */
    PREFETCH_FOR_WRITE((ptr_t)h);
    PREFETCH_FOR_WRITE((ptr_t)h + 128);
    PREFETCH_FOR_WRITE((ptr_t)h + 256);
    PREFETCH_FOR_WRITE((ptr_t)h + 378);
  /* Handle small objects sizes more efficiently.  For larger objects   */
  /* the difference is less significant.                                */
#  ifndef SMALL_CONFIG
     switch (sz) {
        case 2: if (clear) {
                    return GC_build_fl_clear2(h, list);
                } else {
                    return GC_build_fl2(h, list);
                }
        case 4: if (clear) {
                    return GC_build_fl_clear4(h, list);
                } else {
                    return GC_build_fl4(h, list);
                }
        default:
                break;
     }
#  endif /* !SMALL_CONFIG */

  /* Clear the page if necessary. */
    if (clear) BZERO(h, HBLKSIZE);

  /* Add objects to free list */
    p = (word *)(h -> hb_body) + sz;    /* second object in *h  */
    prev = (word *)(h -> hb_body);              /* One object behind p  */
    last_object = (word *)((char *)h + HBLKSIZE);
    last_object -= sz;
                            /* Last place for last object to start */

  /* make a list of all objects in *h with head as last object */
    while (p <= last_object) {
      /* current object's link points to last object */
        obj_link(p) = (ptr_t)prev;
        prev = p;
        p += sz;
    }
    p -= sz;                    /* p now points to last object */

  /*
   * put p (which is now head of list of objects in *h) as first
   * pointer in the appropriate free list for this size.
   */
      obj_link(h -> hb_body) = list;
      return ((ptr_t)p);
}
Ejemplo n.º 4
0
/* Allocate lb bytes of pointerful, traced, but not collectable data */
GC_API void * GC_CALL GC_malloc_uncollectable(size_t lb)
{
    void *op;
    void **opp;
    size_t lg;
    DCL_LOCK_STATE;

    if( SMALL_OBJ(lb) ) {
        if (EXTRA_BYTES != 0 && lb != 0) lb--;
                  /* We don't need the extra byte, since this won't be  */
                  /* collected anyway.                                  */
        lg = GC_size_map[lb];
        opp = &(GC_uobjfreelist[lg]);
        LOCK();
        if( (op = *opp) != 0 ) {
            *opp = obj_link(op);
            obj_link(op) = 0;
            GC_bytes_allocd += GRANULES_TO_BYTES(lg);
            /* Mark bit ws already set on free list.  It will be        */
            /* cleared only temporarily during a collection, as a       */
            /* result of the normal free list mark bit clearing.        */
            GC_non_gc_bytes += GRANULES_TO_BYTES(lg);
            UNLOCK();
        } else {
            UNLOCK();
            op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
            /* For small objects, the free lists are completely marked. */
        }
        GC_ASSERT(0 == op || GC_is_marked(op));
        return((void *) op);
    } else {
        hdr * hhdr;

        op = (ptr_t)GC_generic_malloc((word)lb, UNCOLLECTABLE);
        if (0 == op) return(0);

        GC_ASSERT(((word)op & (HBLKSIZE - 1)) == 0); /* large block */
        hhdr = HDR(op);
        /* We don't need the lock here, since we have an undisguised    */
        /* pointer.  We do need to hold the lock while we adjust        */
        /* mark bits.                                                   */
        LOCK();
        set_mark_bit_from_hdr(hhdr, 0); /* Only object. */
#       ifndef THREADS
          GC_ASSERT(hhdr -> hb_n_marks == 0);
                /* This is not guaranteed in the multi-threaded case    */
                /* because the counter could be updated before locking. */
#       endif
        hhdr -> hb_n_marks = 1;
        UNLOCK();
        return((void *) op);
    }
}
Ejemplo n.º 5
0
/* require special handling on allocation.      */
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
{
    void *op;

    GC_ASSERT(I_HOLD_LOCK());
    if(SMALL_OBJ(lb)) {
        struct obj_kind * kind = GC_obj_kinds + k;
        size_t lg = GC_size_map[lb];
        void ** opp = &(kind -> ok_freelist[lg]);

        op = *opp;
        if (EXPECT(0 == op, FALSE)) {
            if (lg == 0) {
                if (!EXPECT(GC_is_initialized, TRUE)) {
                    DCL_LOCK_STATE;
                    UNLOCK(); /* just to unset GC_lock_holder */
                    GC_init();
                    LOCK();
                    lg = GC_size_map[lb];
                }
                if (0 == lg) {
                    GC_extend_size_map(lb);
                    lg = GC_size_map[lb];
                    GC_ASSERT(lg != 0);
                }
                /* Retry */
                opp = &(kind -> ok_freelist[lg]);
                op = *opp;
            }
            if (0 == op) {
                if (0 == kind -> ok_reclaim_list &&
                        !GC_alloc_reclaim_list(kind))
                    return NULL;
                op = GC_allocobj(lg, k);
                if (0 == op)
                    return NULL;
            }
        }
        *opp = obj_link(op);
        obj_link(op) = 0;
        GC_bytes_allocd += GRANULES_TO_BYTES(lg);
    } else {
        op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
        GC_bytes_allocd += lb;
    }

    return op;
}
Ejemplo n.º 6
0
  GC_API void * GC_generic_malloc_kind(size_t bytes, int kind)
{
    size_t granules = ROUNDED_UP_GRANULES(bytes);
    void *tsd;
    void *result;
    void **tiny_fl;
#   if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
      GC_key_t k = GC_thread_key;
      if (EXPECT(0 == k, FALSE)) {
        /* We haven't yet run GC_init_parallel.  That means     */
        /* we also aren't locking, so this is fairly cheap.     */
        return GC_generic_malloc_kind_global(bytes, kind);
      }
      tsd = GC_getspecific(k);
#   else
      tsd = GC_getspecific(GC_thread_key);
#   endif
#   if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
      if (EXPECT(0 == tsd, FALSE)) {
        return GC_generic_malloc_kind_global(bytes, kind);
      }
#   endif
    GC_ASSERT(GC_is_initialized);
    GC_ASSERT(GC_is_thread_tsd_valid(tsd));
    tiny_fl = ((GC_tlfs)tsd)->freelists[kind];
    GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
        kind, GC_generic_malloc_kind_global(bytes, kind), obj_link(result) = 0);
    return result;
}
Ejemplo n.º 7
0
GC_INNER void GC_free_inner(void * p)
{
    struct hblk *h;
    hdr *hhdr;
    size_t sz; /* bytes */
    size_t ngranules;  /* sz in granules */
    void ** flh;
    int knd;
    struct obj_kind * ok;

    h = HBLKPTR(p);
    hhdr = HDR(h);
    knd = hhdr -> hb_obj_kind;
    sz = hhdr -> hb_sz;
    ngranules = BYTES_TO_GRANULES(sz);
    ok = &GC_obj_kinds[knd];
    if (ngranules <= MAXOBJGRANULES) {
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        if (ok -> ok_init) {
            BZERO((word *)p + 1, sz-sizeof(word));
        }
        flh = &(ok -> ok_freelist[ngranules]);
        obj_link(p) = *flh;
        *flh = (ptr_t)p;
    } else {
        size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        if (nblocks > 1) {
            GC_large_allocd_bytes -= nblocks * HBLKSIZE;
        }
        GC_freehblk(h);
    }
}
Ejemplo n.º 8
0
void * GC_gcj_malloc_ignore_off_page(size_t lb,
				     void * ptr_to_struct_containing_descr) 
{
    ptr_t op;
    ptr_t * opp;
    word lg;
    DCL_LOCK_STATE;

    if(SMALL_OBJ(lb)) {
	lg = GC_size_map[lb];
	opp = &(GC_gcjobjfreelist[lg]);
	LOCK();
        if( (op = *opp) == 0 ) {
	    maybe_finalize();
            op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_gcj_kind);
	    lg = GC_size_map[lb];	/* May have been uninitialized.	*/
        } else {
            *opp = obj_link(op);
            GC_bytes_allocd += GRANULES_TO_BYTES(lg);
        }
	*(void **)op = ptr_to_struct_containing_descr;
	UNLOCK();
    } else {
	LOCK();
	maybe_finalize();
        op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_gcj_kind);
        if (0 != op) {
          *(void **)op = ptr_to_struct_containing_descr;
	}
        UNLOCK();
    }
    return((void *) op);
}
Ejemplo n.º 9
0
/* allocated as a small object.					*/
void * GC_gcj_fast_malloc(size_t lw, void * ptr_to_struct_containing_descr)
{
ptr_t op;
ptr_t * opp;
DCL_LOCK_STATE;

    opp = &(GC_gcjobjfreelist[lw]);
    LOCK();
    op = *opp;
    if( EXPECT(op == 0, 0) ) {
	maybe_finalize();
        op = (ptr_t)GC_clear_stack(
		GC_generic_malloc_words_small_inner(lw, GC_gcj_kind));
	if (0 == op) {
	    UNLOCK();
	    return GC_oom_fn(WORDS_TO_BYTES(lw));
	}
    } else {
        *opp = obj_link(op);
        GC_words_allocd += lw;
    }
    *(void **)op = ptr_to_struct_containing_descr;
    UNLOCK();
    return((GC_PTR) op);
}
Ejemplo n.º 10
0
/* The same thing, but don't clear objects: */
STATIC ptr_t GC_reclaim_uninit(struct hblk *hbp, hdr *hhdr, size_t sz,
                               ptr_t list, signed_word *count)
{
    word bit_no = 0;
    word *p, *plim;
    signed_word n_bytes_found = 0;

    GC_ASSERT(sz == hhdr -> hb_sz);
    p = (word *)(hbp->hb_body);
    plim = (word *)((ptr_t)hbp + HBLKSIZE - sz);

    /* go through all words in block */
    while (p <= plim) {
        if( !mark_bit_from_hdr(hhdr, bit_no) ) {
            n_bytes_found += sz;
            /* object is available - put on list */
            obj_link(p) = list;
            list = ((ptr_t)p);
        }
        p = (word *)((ptr_t)p + sz);
        bit_no += MARK_BIT_OFFSET(sz);
    }
    *count += n_bytes_found;
    return(list);
}
Ejemplo n.º 11
0
GC_PTR GC_local_malloc_atomic(size_t bytes)
{
    if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
        return(GC_malloc_atomic(bytes));
    } else {
	int index = INDEX_FROM_BYTES(bytes);
	ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
		        -> ptrfree_freelists + index;
	ptr_t my_entry = *my_fl;
    
	if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
	    GC_PTR result = (GC_PTR)my_entry;
	    *my_fl = obj_link(my_entry);
	    return result;
	} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
	    *my_fl = my_entry + index + 1;
        return GC_malloc_atomic(bytes);
	} else {
	    GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
	    /* *my_fl is updated while the collector is excluded;	*/
	    /* the free list is always visible to the collector as 	*/
	    /* such.							*/
	    if (*my_fl == 0) return GC_oom_fn(bytes);
	    return GC_local_malloc_atomic(bytes);
	}
    }
}
Ejemplo n.º 12
0
/* We hold the allocator lock.						*/
static void return_freelists(ptr_t *fl, ptr_t *gfl)
{
    int i;
    ptr_t q, *qptr;
    size_t nwords;

    for (i = 1; i < NFREELISTS; ++i) {
	nwords = i * (GRANULARITY/sizeof(word));
        qptr = fl + i;	
	q = *qptr;
	if ((word)q >= HBLKSIZE) {
	  if (gfl[nwords] == 0) {
	    gfl[nwords] = q;
	  } else {
	    /* Concatenate: */
	    for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
	    GC_ASSERT(0 == q);
	    *qptr = gfl[nwords];
	    gfl[nwords] = fl[i];
	  }
	}
	/* Clear fl[i], since the thread structure may hang around.	*/
	/* Do it in a way that is likely to trap if we access it.	*/
	fl[i] = (ptr_t)HBLKSIZE;
    }
}
Ejemplo n.º 13
0
GC_PTR GC_local_malloc(size_t bytes)
{
    if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
        return(GC_malloc(bytes));
    } else {
	int index = INDEX_FROM_BYTES(bytes);
	ptr_t * my_fl;
	ptr_t my_entry;
#	if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
	GC_key_t k = GC_thread_key;
#	endif
	void * tsd;

#	if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
	    if (EXPECT(0 == k, 0)) {
		/* This can happen if we get called when the world is	*/
		/* being initialized.  Whether we can actually complete	*/
		/* the initialization then is unclear.			*/
		GC_init_parallel();
		k = GC_thread_key;
	    }
#	endif
	tsd = GC_getspecific(GC_thread_key);
#	ifdef GC_ASSERTIONS
	  LOCK();
	  GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
	  UNLOCK();
#	endif
	my_fl = ((GC_thread)tsd) -> normal_freelists + index;
	my_entry = *my_fl;
	if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
	    ptr_t next = obj_link(my_entry);
	    GC_PTR result = (GC_PTR)my_entry;
	    *my_fl = next;
	    obj_link(my_entry) = 0;
	    PREFETCH_FOR_WRITE(next);
	    return result;
	} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
	    *my_fl = my_entry + index + 1;
            return GC_malloc(bytes);
	} else {
	    GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
	    if (*my_fl == 0) return GC_oom_fn(bytes);
	    return GC_local_malloc(bytes);
	}
    }
}
Ejemplo n.º 14
0
/* Explicitly deallocate an object p.                           */
GC_API void GC_CALL GC_free(void * p)
{
    struct hblk *h;
    hdr *hhdr;
    size_t sz; /* In bytes */
    size_t ngranules;   /* sz in granules */
    void **flh;
    int knd;
    struct obj_kind * ok;
    DCL_LOCK_STATE;

    if (p == 0) return;
    /* Required by ANSI.  It's not my fault ...     */
#   ifdef LOG_ALLOCS
    GC_log_printf("GC_free(%p) after GC #%lu\n",
                  p, (unsigned long)GC_gc_no);
#   endif
    h = HBLKPTR(p);
    hhdr = HDR(h);
#   if defined(REDIRECT_MALLOC) && \
        (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
         || defined(MSWIN32))
    /* For Solaris, we have to redirect malloc calls during         */
    /* initialization.  For the others, this seems to happen        */
    /* implicitly.                                                  */
    /* Don't try to deallocate that memory.                         */
    if (0 == hhdr) return;
#   endif
    GC_ASSERT(GC_base(p) == p);
    sz = hhdr -> hb_sz;
    ngranules = BYTES_TO_GRANULES(sz);
    knd = hhdr -> hb_obj_kind;
    ok = &GC_obj_kinds[knd];
    if (EXPECT(ngranules <= MAXOBJGRANULES, TRUE)) {
        LOCK();
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        /* Its unnecessary to clear the mark bit.  If the       */
        /* object is reallocated, it doesn't matter.  O.w. the  */
        /* collector will do it, since it's on a free list.     */
        if (ok -> ok_init) {
            BZERO((word *)p + 1, sz-sizeof(word));
        }
        flh = &(ok -> ok_freelist[ngranules]);
        obj_link(p) = *flh;
        *flh = (ptr_t)p;
        UNLOCK();
    } else {
        size_t nblocks = OBJ_SZ_TO_BLOCKS(sz);
        LOCK();
        GC_bytes_freed += sz;
        if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= sz;
        if (nblocks > 1) {
            GC_large_allocd_bytes -= nblocks * HBLKSIZE;
        }
        GC_freehblk(h);
        UNLOCK();
    }
}
Ejemplo n.º 15
0
static void return_single_freelist(void *fl, void **gfl)
{
    void *q, **qptr;

    if (*gfl == 0) {
      *gfl = fl;
    } else {
      GC_ASSERT(GC_size(fl) == GC_size(*gfl));
      /* Concatenate: */
        qptr = &(obj_link(fl));
        while ((word)(q = *qptr) >= HBLKSIZE)
          qptr = &(obj_link(q));
        GC_ASSERT(0 == q);
        *qptr = *gfl;
        *gfl = fl;
    }
}
Ejemplo n.º 16
0
/*
 * Clear all obj_link pointers in the list of free objects *flp.
 * Clear *flp.
 * This must be done before dropping a list of free gcj-style objects,
 * since may otherwise end up with dangling "descriptor" pointers.
 * It may help for other pointer-containing objects.
 */
STATIC void GC_clear_fl_links(void **flp)
{
    void *next = *flp;

    while (0 != next) {
        *flp = 0;
        flp = &(obj_link(next));
        next = *flp;
    }
}
Ejemplo n.º 17
0
  GC_API void * GC_CALL GC_finalized_malloc(size_t lb,
                                const struct GC_finalizer_closure *fclos)
#endif
{
    ptr_t op;
    word lg;
    DCL_LOCK_STATE;

    lb += sizeof(void *);
    GC_ASSERT(done_init);
    if (SMALL_OBJ(lb)) {
        GC_DBG_COLLECT_AT_MALLOC(lb);
        lg = GC_size_map[lb];
        LOCK();
        op = GC_finalized_objfreelist[lg];
        if (EXPECT(0 == op, FALSE)) {
            UNLOCK();
            op = GC_generic_malloc(lb, GC_finalized_kind);
            if (NULL == op)
                return NULL;
            /* GC_generic_malloc has extended the size map for us.      */
            lg = GC_size_map[lb];
        } else {
            GC_finalized_objfreelist[lg] = obj_link(op);
            obj_link(op) = 0;
            GC_bytes_allocd += GRANULES_TO_BYTES(lg);
            UNLOCK();
        }
        GC_ASSERT(lg > 0);
        ((const void **)op)[GRANULES_TO_WORDS(lg) - 1] = fclos;
    } else {
        size_t op_sz;

        op = GC_generic_malloc(lb, GC_finalized_kind);
        if (NULL == op)
            return NULL;
        op_sz = GC_size(op);
        GC_ASSERT(op_sz >= lb);
        ((const void **)op)[op_sz / sizeof(void *) - 1] = fclos;
    }
    return GC_clear_stack(op);
}
Ejemplo n.º 18
0
  /* the appropriate free list.                                         */
  STATIC GC_bool GC_on_free_list(struct hblk *h)
  {
    hdr * hhdr = HDR(h);
    size_t sz = BYTES_TO_WORDS(hhdr -> hb_sz);
    ptr_t p;

    if (sz > MAXOBJWORDS) return(FALSE);
    for (p = GC_sobjfreelist[sz]; p != 0; p = obj_link(p)) {
        if (HBLKPTR(p) == h) return(TRUE);
    }
    return(FALSE);
  }
Ejemplo n.º 19
0
  GC_API void * GC_CALL GC_finalized_malloc(size_t client_lb,
                                const struct GC_finalizer_closure *fclos)
  {
    size_t lb = client_lb + sizeof(void *);
    size_t lg = ROUNDED_UP_GRANULES(lb);
    GC_tlfs tsd;
    void *result;
    void **tiny_fl, **my_fl, *my_entry;
    void *next;

    if (EXPECT(lg >= GC_TINY_FREELISTS, FALSE))
        return GC_core_finalized_malloc(client_lb, fclos);

    tsd = GC_getspecific(GC_thread_key);
    tiny_fl = tsd->finalized_freelists;
    my_fl = tiny_fl + lg;
    my_entry = *my_fl;
    while (EXPECT((word)my_entry
                  <= DIRECT_GRANULES + GC_TINY_FREELISTS + 1, FALSE)) {
        if ((word)my_entry - 1 < DIRECT_GRANULES) {
            *my_fl = (ptr_t)my_entry + lg + 1;
            return GC_core_finalized_malloc(client_lb, fclos);
        } else {
            GC_generic_malloc_many(GC_RAW_BYTES_FROM_INDEX(lg),
                                   GC_finalized_kind, my_fl);
            my_entry = *my_fl;
            if (my_entry == 0) {
                return (*GC_get_oom_fn())(lb);
            }
        }
    }

    next = obj_link(my_entry);
    result = (void *)my_entry;
    *my_fl = next;
    obj_link(result) = 0;
    ((const void **)result)[GRANULES_TO_WORDS(lg) - 1] = fclos;
    PREFETCH_FOR_WRITE(next);
    return result;
  }
Ejemplo n.º 20
0
/* This adds a byte at the end of the object if GC_malloc would.*/
void * GC_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr)
{
register ptr_t op;
register ptr_t * opp;
register word lw;
DCL_LOCK_STATE;

    if( EXPECT(SMALL_OBJ(lb), 1) ) {
#       ifdef MERGE_SIZES
	  lw = GC_size_map[lb];
#	else
	  lw = ALIGNED_WORDS(lb);
#       endif
	opp = &(GC_gcjobjfreelist[lw]);
	LOCK();
	op = *opp;
        if(EXPECT(op == 0, 0)) {
	    maybe_finalize();
            op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind);
	    if (0 == op) {
		UNLOCK();
		return(GC_oom_fn(lb));
	    }
#	    ifdef MERGE_SIZES
		lw = GC_size_map[lb];	/* May have been uninitialized.	*/
#	    endif
        } else {
            *opp = obj_link(op);
            GC_words_allocd += lw;
        }
	*(void **)op = ptr_to_struct_containing_descr;
	GC_ASSERT(((void **)op)[1] == 0);
	UNLOCK();
    } else {
	LOCK();
	maybe_finalize();
	op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind);
	if (0 == op) {
	    UNLOCK();
	    return(GC_oom_fn(lb));
	}
	*(void **)op = ptr_to_struct_containing_descr;
	UNLOCK();
    }
    return((GC_PTR) op);
}
Ejemplo n.º 21
0
/* Currently for debugger use only: */
void GC_print_free_list(int kind, size_t sz_in_granules)
{
    struct obj_kind * ok = &GC_obj_kinds[kind];
    ptr_t flh = ok -> ok_freelist[sz_in_granules];
    struct hblk *lastBlock = 0;
    int n = 0;

    while (flh) {
        struct hblk *block = HBLKPTR(flh);
        if (block != lastBlock) {
            GC_printf("\nIn heap block at %p:\n\t", block);
            lastBlock = block;
        }
        GC_printf("%d: %p;", ++n, flh);
        flh = obj_link(flh);
    }
}
Ejemplo n.º 22
0
/*
 * Restore unmarked small objects in h of size sz to the object
 * free list.  Returns the new list.
 * Clears unmarked objects.  Sz is in bytes.
 */
STATIC ptr_t GC_reclaim_clear(struct hblk *hbp, hdr *hhdr, size_t sz,
                              ptr_t list, signed_word *count)
{
    word bit_no = 0;
    word *p, *q, *plim;
    signed_word n_bytes_found = 0;

    GC_ASSERT(hhdr == GC_find_header((ptr_t)hbp));
    GC_ASSERT(sz == hhdr -> hb_sz);
    GC_ASSERT((sz & (BYTES_PER_WORD-1)) == 0);
    p = (word *)(hbp->hb_body);
    plim = (word *)(hbp->hb_body + HBLKSIZE - sz);

    /* go through all words in block */
    while (p <= plim) {
        if( mark_bit_from_hdr(hhdr, bit_no) ) {
            p = (word *)((ptr_t)p + sz);
        } else {
            n_bytes_found += sz;
            /* object is available - put on list */
            obj_link(p) = list;
            list = ((ptr_t)p);
            /* Clear object, advance p to next object in the process */
            q = (word *)((ptr_t)p + sz);
#                   ifdef USE_MARK_BYTES
            GC_ASSERT(!(sz & 1)
                      && !((word)p & (2 * sizeof(word) - 1)));
            p[1] = 0;
            p += 2;
            while (p < q) {
                CLEAR_DOUBLE(p);
                p += 2;
            }
#                   else
            p++; /* Skip link field */
            while (p < q) {
                *p++ = 0;
            }
#                   endif
        }
        bit_no += MARK_BIT_OFFSET(sz);
    }
    *count += n_bytes_found;
    return(list);
}
Ejemplo n.º 23
0
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_kind(size_t bytes, int knd)
{
    size_t granules;
    void *tsd;
    void *result;

#   if MAXOBJKINDS > THREAD_FREELISTS_KINDS
      if (EXPECT(knd >= THREAD_FREELISTS_KINDS, FALSE)) {
        return GC_malloc_kind_global(bytes, knd);
      }
#   endif
#   if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
    {
      GC_key_t k = GC_thread_key;

      if (EXPECT(0 == k, FALSE)) {
        /* We haven't yet run GC_init_parallel.  That means     */
        /* we also aren't locking, so this is fairly cheap.     */
        return GC_malloc_kind_global(bytes, knd);
      }
      tsd = GC_getspecific(k);
    }
#   else
      tsd = GC_getspecific(GC_thread_key);
#   endif
#   if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
      if (EXPECT(0 == tsd, FALSE)) {
        return GC_malloc_kind_global(bytes, knd);
      }
#   endif
    GC_ASSERT(GC_is_initialized);
    GC_ASSERT(GC_is_thread_tsd_valid(tsd));
    granules = ROUNDED_UP_GRANULES(bytes);
    GC_FAST_MALLOC_GRANS(result, granules,
                         ((GC_tlfs)tsd) -> _freelists[knd], DIRECT_GRANULES,
                         knd, GC_malloc_kind_global(bytes, knd),
                         (void)(knd == PTRFREE ? NULL
                                               : (obj_link(result) = 0)));
#   ifdef LOG_ALLOCS
      GC_log_printf("GC_malloc_kind(%lu, %d) returned %p, recent GC #%lu\n",
                    (unsigned long)bytes, knd, result,
                    (unsigned long)GC_gc_no);
#   endif
    return result;
}
Ejemplo n.º 24
0
GC_PTR GC_local_gcj_malloc(size_t bytes,
			   void * ptr_to_struct_containing_descr)
{
    GC_ASSERT(GC_gcj_malloc_initialized);
    if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
        return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
    } else {
	int index = INDEX_FROM_BYTES(bytes);
	ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
	                -> gcj_freelists + index;
	ptr_t my_entry = *my_fl;
	if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
	    GC_PTR result = (GC_PTR)my_entry;
	    GC_ASSERT(!GC_incremental);
	    /* We assert that any concurrent marker will stop us.	*/
	    /* Thus it is impossible for a mark procedure to see the 	*/
	    /* allocation of the next object, but to see this object 	*/
	    /* still containing a free list pointer.  Otherwise the 	*/
	    /* marker might find a random "mark descriptor".		*/
	    *(volatile ptr_t *)my_fl = obj_link(my_entry);
	    /* We must update the freelist before we store the pointer.	*/
	    /* Otherwise a GC at this point would see a corrupted	*/
	    /* free list.						*/
	    /* A memory barrier is probably never needed, since the 	*/
	    /* action of stopping this thread will cause prior writes	*/
	    /* to complete.						*/
	    GC_ASSERT(((void * volatile *)result)[1] == 0); 
	    *(void * volatile *)result = ptr_to_struct_containing_descr; 
	    return result;
	} else if ((word)my_entry - 1 < DIRECT_GRANULES) {
	    if (!GC_incremental) *my_fl = my_entry + index + 1;
	    	/* In the incremental case, we always have to take this */
	    	/* path.  Thus we leave the counter alone.		*/
            return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
	} else {
	    GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
	    if (*my_fl == 0) return GC_oom_fn(bytes);
	    return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
	}
    }
}
Ejemplo n.º 25
0
  void * GC_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr)
#endif
{
    ptr_t op;
    ptr_t * opp;
    word lg;
    DCL_LOCK_STATE;

    if(SMALL_OBJ(lb)) {
	lg = GC_size_map[lb];
	opp = &(GC_gcjobjfreelist[lg]);
	LOCK();
	op = *opp;
        if(EXPECT(op == 0, 0)) {
	    maybe_finalize();
            op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind);
	    if (0 == op) {
		UNLOCK();
		return(GC_oom_fn(lb));
	    }
        } else {
            *opp = obj_link(op);
            GC_bytes_allocd += GRANULES_TO_BYTES(lg);
        }
	*(void **)op = ptr_to_struct_containing_descr;
	GC_ASSERT(((void **)op)[1] == 0);
	UNLOCK();
    } else {
	LOCK();
	maybe_finalize();
	op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind);
	if (0 == op) {
	    UNLOCK();
	    return(GC_oom_fn(lb));
	}
	*(void **)op = ptr_to_struct_containing_descr;
	UNLOCK();
    }
    return((void *) op);
}
Ejemplo n.º 26
0
void * GC_gcj_malloc_ignore_off_page(size_t lb,
				     void * ptr_to_struct_containing_descr) 
{
register ptr_t op;
register ptr_t * opp;
register word lw;
DCL_LOCK_STATE;

    if( SMALL_OBJ(lb) ) {
#       ifdef MERGE_SIZES
	  lw = GC_size_map[lb];
#	else
	  lw = ALIGNED_WORDS(lb);
#       endif
	opp = &(GC_gcjobjfreelist[lw]);
	LOCK();
        if( (op = *opp) == 0 ) {
	    maybe_finalize();
            op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_gcj_kind);
#	    ifdef MERGE_SIZES
		lw = GC_size_map[lb];	/* May have been uninitialized.	*/
#	    endif
        } else {
            *opp = obj_link(op);
            GC_words_allocd += lw;
        }
	*(void **)op = ptr_to_struct_containing_descr;
	UNLOCK();
    } else {
	LOCK();
	maybe_finalize();
        op = (ptr_t)GENERAL_MALLOC_IOP(lb, GC_gcj_kind);
        if (0 != op) {
          *(void **)op = ptr_to_struct_containing_descr;
	}
        UNLOCK();
    }
    return((GC_PTR) op);
}
Ejemplo n.º 27
0
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc(size_t bytes)
{
    size_t granules = ROUNDED_UP_GRANULES(bytes);
    void *tsd;
    void *result;
    void **tiny_fl;

#   if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_WIN32_SPECIFIC)
      GC_key_t k = GC_thread_key;
      if (EXPECT(0 == k, FALSE)) {
        /* We haven't yet run GC_init_parallel.  That means     */
        /* we also aren't locking, so this is fairly cheap.     */
        return GC_core_malloc(bytes);
      }
      tsd = GC_getspecific(k);
#   else
      tsd = GC_getspecific(GC_thread_key);
#   endif
#   if !defined(USE_COMPILER_TLS) && !defined(USE_WIN32_COMPILER_TLS)
      if (EXPECT(0 == tsd, FALSE)) {
        return GC_core_malloc(bytes);
      }
#   endif
    GC_ASSERT(GC_is_initialized);

    GC_ASSERT(GC_is_thread_tsd_valid(tsd));

    tiny_fl = ((GC_tlfs)tsd) -> normal_freelists;
    GC_FAST_MALLOC_GRANS(result, granules, tiny_fl, DIRECT_GRANULES,
                         NORMAL, GC_core_malloc(bytes), obj_link(result)=0);
#   ifdef LOG_ALLOCS
      GC_log_printf("GC_malloc(%lu) returned %p, recent GC #%lu\n",
                    (unsigned long)bytes, result, (unsigned long)GC_gc_no);
#   endif
    return result;
}
Ejemplo n.º 28
0
GC_API GC_ATTR_MALLOC void * GC_CALL GC_malloc_atomic(size_t lb)
#endif
{
    void *op;
    size_t lg;
    DCL_LOCK_STATE;

    if(SMALL_OBJ(lb)) {
        GC_DBG_COLLECT_AT_MALLOC(lb);
        lg = GC_size_map[lb];
        LOCK();
        op = GC_freelists[PTRFREE][lg];
        if (EXPECT(0 == op, FALSE)) {
            UNLOCK();
            return(GENERAL_MALLOC((word)lb, PTRFREE));
        }
        GC_freelists[PTRFREE][lg] = obj_link(op);
        GC_bytes_allocd += GRANULES_TO_BYTES(lg);
        UNLOCK();
        return((void *) op);
    } else {
        return(GENERAL_MALLOC((word)lb, PTRFREE));
    }
}
Ejemplo n.º 29
0
  GC_API void * GC_CALL GC_malloc_atomic(size_t lb)
#endif
{
    void *op;
    void ** opp;
    size_t lg;
    DCL_LOCK_STATE;

    if(SMALL_OBJ(lb)) {
        lg = GC_size_map[lb];
        opp = &(GC_aobjfreelist[lg]);
        LOCK();
        if (EXPECT((op = *opp) == 0, FALSE)) {
            UNLOCK();
            return(GENERAL_MALLOC((word)lb, PTRFREE));
        }
        *opp = obj_link(op);
        GC_bytes_allocd += GRANULES_TO_BYTES(lg);
        UNLOCK();
        return((void *) op);
   } else {
       return(GENERAL_MALLOC((word)lb, PTRFREE));
   }
}
Ejemplo n.º 30
0
// --------------------------------------------------------------------
// Load game 
// Loads all the relevant data for a level.
// If level != -1, it loads the filename with extension changed to .min
// Otherwise it loads the appropriate level mine.
// returns 0=everything ok, 1=old version, -1=error
int load_game_data(PHYSFS_file *LoadFile)
{
	int i,j;

	short game_top_fileinfo_version;
	int object_offset;
	int gs_num_objects;
	int trig_size;

	//===================== READ FILE INFO ========================

#if 0
	PHYSFS_read(LoadFile, &game_top_fileinfo, sizeof(game_top_fileinfo), 1);
#endif

	// Check signature
	if (PHYSFSX_readShort(LoadFile) != 0x6705)
		return -1;

	// Read and check version number
	game_top_fileinfo_version = PHYSFSX_readShort(LoadFile);
	if (game_top_fileinfo_version < GAME_COMPATIBLE_VERSION )
		return -1;

	// We skip some parts of the former game_top_fileinfo
	PHYSFSX_fseek(LoadFile, 31, SEEK_CUR);

	object_offset = PHYSFSX_readInt(LoadFile);
	gs_num_objects = PHYSFSX_readInt(LoadFile);
	PHYSFSX_fseek(LoadFile, 8, SEEK_CUR);

	Num_walls = PHYSFSX_readInt(LoadFile);
	PHYSFSX_fseek(LoadFile, 20, SEEK_CUR);

	Num_triggers = PHYSFSX_readInt(LoadFile);
	PHYSFSX_fseek(LoadFile, 24, SEEK_CUR);

	trig_size = PHYSFSX_readInt(LoadFile);
	Assert(trig_size == sizeof(ControlCenterTriggers));
	(void)trig_size;
	PHYSFSX_fseek(LoadFile, 4, SEEK_CUR);

	Num_robot_centers = PHYSFSX_readInt(LoadFile);
	PHYSFSX_fseek(LoadFile, 4, SEEK_CUR);

	if (game_top_fileinfo_version >= 31) //load mine filename
		// read newline-terminated string, not sure what version this changed.
		PHYSFSX_fgets(Current_level_name,sizeof(Current_level_name),LoadFile);
	else if (game_top_fileinfo_version >= 14) { //load mine filename
		// read null-terminated string
		char *p=Current_level_name;
		//must do read one char at a time, since no PHYSFSX_fgets()
		do *p = PHYSFSX_fgetc(LoadFile); while (*p++!=0);
	}
	else
		Current_level_name[0]=0;

	if (game_top_fileinfo_version >= 19) {	//load pof names
		N_save_pof_names = PHYSFSX_readShort(LoadFile);
		if (N_save_pof_names != 0x614d && N_save_pof_names != 0x5547) { // "Ma"de w/DMB beta/"GU"ILE
			Assert(N_save_pof_names < MAX_POLYGON_MODELS);
			PHYSFS_read(LoadFile,Save_pof_names,N_save_pof_names,FILENAME_LEN);
		}
	}

	//===================== READ PLAYER INFO ==========================


	//===================== READ OBJECT INFO ==========================

	Gamesave_num_org_robots = 0;
	Gamesave_num_players = 0;

	if (object_offset > -1) {
		if (PHYSFSX_fseek( LoadFile, object_offset, SEEK_SET ))
			Error( "Error seeking to object_offset in gamesave.c" );

		for (i = 0; i < gs_num_objects; i++) {

			read_object(&Objects[i], LoadFile, game_top_fileinfo_version);

			Objects[i].signature = obj_get_signature();
			verify_object( &Objects[i] );
		}

	}

	//===================== READ WALL INFO ============================

	for (i = 0; i < Num_walls; i++) {
		if (game_top_fileinfo_version >= 20)
			wall_read(&Walls[i], LoadFile); // v20 walls and up.
		else if (game_top_fileinfo_version >= 17) {
			v19_wall w;
			v19_wall_read(&w, LoadFile);
			Walls[i].segnum	        = w.segnum;
			Walls[i].sidenum	= w.sidenum;
			Walls[i].linked_wall	= w.linked_wall;
			Walls[i].type		= w.type;
			Walls[i].flags		= w.flags;
			Walls[i].hps		= w.hps;
			Walls[i].trigger	= w.trigger;
			Walls[i].clip_num	= convert_wclip(w.clip_num);
			Walls[i].keys		= w.keys;
			Walls[i].state		= WALL_DOOR_CLOSED;
		} else {
			v16_wall w;
			v16_wall_read(&w, LoadFile);
			Walls[i].segnum = Walls[i].sidenum = Walls[i].linked_wall = -1;
			Walls[i].type		= w.type;
			Walls[i].flags		= w.flags;
			Walls[i].hps		= w.hps;
			Walls[i].trigger	= w.trigger;
			Walls[i].clip_num	= convert_wclip(w.clip_num);
			Walls[i].keys		= w.keys;
		}
	}

#if 0
	//===================== READ DOOR INFO ============================

	if (game_fileinfo.doors_offset > -1)
	{
		if (!PHYSFSX_fseek( LoadFile, game_fileinfo.doors_offset,SEEK_SET ))	{

			for (i=0;i<game_fileinfo.doors_howmany;i++) {

				if (game_top_fileinfo_version >= 20)
					active_door_read(&ActiveDoors[i], LoadFile); // version 20 and up
				else {
					v19_door d;
					int p;

					v19_door_read(&d, LoadFile);

					ActiveDoors[i].n_parts = d.n_parts;

					for (p=0;p<d.n_parts;p++) {
						int cseg,cside;

						cseg = Segments[d.seg[p]].children[d.side[p]];
						cside = find_connect_side(&Segments[d.seg[p]],&Segments[cseg]);

						ActiveDoors[i].front_wallnum[p] = Segments[d.seg[p]].sides[d.side[p]].wall_num;
						ActiveDoors[i].back_wallnum[p] = Segments[cseg].sides[cside].wall_num;
					}
				}

			}
		}
	}
#endif // 0

	//==================== READ TRIGGER INFO ==========================

	for (i = 0; i < Num_triggers; i++)
	{
		if (game_top_fileinfo_version <= 25)
			trigger_read(&Triggers[i], LoadFile);
		else {
			int type;
			switch ((type = PHYSFSX_readByte(LoadFile)))
			{
				case 0: // door
					Triggers[i].type = 0;
					Triggers[i].flags = TRIGGER_CONTROL_DOORS;
					break;
				case 2: // matcen
					Triggers[i].type = 0;
					Triggers[i].flags = TRIGGER_MATCEN;
					break;
				case 3: // exit
					Triggers[i].type = 0;
					Triggers[i].flags = TRIGGER_EXIT;
					break;
				case 4: // secret exit
					Triggers[i].type = 0;
					Triggers[i].flags = TRIGGER_SECRET_EXIT;
					break;
				case 5: // illusion off
					Triggers[i].type = 0;
					Triggers[i].flags = TRIGGER_ILLUSION_OFF;
					break;
				case 6: // illusion on
					Triggers[i].type = 0;
					Triggers[i].flags = TRIGGER_ILLUSION_ON;
					break;
				default:
					con_printf(CON_URGENT,"Warning: unsupported trigger type %d (%d)\n", type, i);
			}
			if (PHYSFSX_readByte(LoadFile) & 2)	// one shot
				Triggers[i].flags |= TRIGGER_ONE_SHOT;
			Triggers[i].num_links = PHYSFSX_readShort(LoadFile);
			Triggers[i].value = PHYSFSX_readInt(LoadFile);
			Triggers[i].time = PHYSFSX_readInt(LoadFile);
			for (j=0; j<MAX_WALLS_PER_LINK; j++ )	
				Triggers[i].seg[j] = PHYSFSX_readShort(LoadFile);
			for (j=0; j<MAX_WALLS_PER_LINK; j++ )
				Triggers[i].side[j] = PHYSFSX_readShort(LoadFile);
		}
	}

	//================ READ CONTROL CENTER TRIGGER INFO ===============

	control_center_triggers_read_n(&ControlCenterTriggers, 1, LoadFile);

	//================ READ MATERIALOGRIFIZATIONATORS INFO ===============

	for (i = 0; i < Num_robot_centers; i++) {
		matcen_info_read(&RobotCenters[i], LoadFile, game_top_fileinfo_version);
		
		//	Set links in RobotCenters to Station array
		for (j = 0; j <= Highest_segment_index; j++)
			if (Segments[j].special == SEGMENT_IS_ROBOTMAKER)
				if (Segments[j].matcen_num == i)
					RobotCenters[i].fuelcen_num = Segments[j].value;
	}


	//========================= UPDATE VARIABLES ======================

	reset_objects(gs_num_objects);

	for (i=0; i<MAX_OBJECTS; i++) {
		Objects[i].next = Objects[i].prev = -1;
		if (Objects[i].type != OBJ_NONE) {
			int objsegnum = Objects[i].segnum;

			if (objsegnum > Highest_segment_index)		//bogus object
				Objects[i].type = OBJ_NONE;
			else {
				Objects[i].segnum = -1;			//avoid Assert()
				obj_link(i,objsegnum);
			}
		}
	}

	clear_transient_objects(1);		//1 means clear proximity bombs

	// Make sure non-transparent doors are set correctly.
	for (i=0; i< Num_segments; i++)
		for (j=0;j<MAX_SIDES_PER_SEGMENT;j++) {
			side	*sidep = &Segments[i].sides[j];
			if ((sidep->wall_num != -1) && (Walls[sidep->wall_num].clip_num != -1)) {
				if (WallAnims[Walls[sidep->wall_num].clip_num].flags & WCF_TMAP1) {
					sidep->tmap_num = WallAnims[Walls[sidep->wall_num].clip_num].frames[0];
					sidep->tmap_num2 = 0;
				}
			}
		}


	reset_walls();

#if 0
	Num_open_doors = game_fileinfo.doors_howmany;
#endif // 0
	Num_open_doors = 0;

	//go through all walls, killing references to invalid triggers
	for (i=0;i<Num_walls;i++)
		if (Walls[i].trigger >= Num_triggers) {
			Walls[i].trigger = -1;	//kill trigger
		}

	//go through all triggers, killing unused ones
	for (i=0;i<Num_triggers;) {
		int w;

		//	Find which wall this trigger is connected to.
		for (w=0; w<Num_walls; w++)
			if (Walls[w].trigger == i)
				break;

	#ifdef EDITOR
		if (w == Num_walls) {
			remove_trigger_num(i);
		}
		else
	#endif
			i++;
	}

	//	MK, 10/17/95: Make walls point back at the triggers that control them.
	//	Go through all triggers, stuffing controlling_trigger field in Walls.
	{
		int t;

		for (t=0; t<Num_triggers; t++) {
			int	l;
			for (l=0; l<Triggers[t].num_links; l++) {
				int	seg_num;

				seg_num = Triggers[t].seg[l];

				//check to see that if a trigger requires a wall that it has one,
				//and if it requires a matcen that it has one

				if (Triggers[t].type == TRIGGER_MATCEN) {
					if (Segments[seg_num].special != SEGMENT_IS_ROBOTMAKER)
						Int3();		//matcen trigger doesn't point to matcen
				}
			}
		}
	}

	//fix old wall structs
	if (game_top_fileinfo_version < 17) {
		int segnum,sidenum,wallnum;

		for (segnum=0; segnum<=Highest_segment_index; segnum++)
			for (sidenum=0;sidenum<6;sidenum++)
				if ((wallnum=Segments[segnum].sides[sidenum].wall_num) != -1) {
					Walls[wallnum].segnum = segnum;
					Walls[wallnum].sidenum = sidenum;
				}
	}

	#ifndef NDEBUG
	{
		int	sidenum;
		for (sidenum=0; sidenum<6; sidenum++) {
			int	wallnum = Segments[Highest_segment_index].sides[sidenum].wall_num;
			if (wallnum != -1)
				if ((Walls[wallnum].segnum != Highest_segment_index) || (Walls[wallnum].sidenum != sidenum))
					Int3();	//	Error.  Bogus walls in this segment.
								// Consult Yuan or Mike.
		}
	}
	#endif

	//create_local_segment_data();

	fix_object_segs();

	#ifndef NDEBUG
	dump_mine_info();
	#endif

	if (game_top_fileinfo_version < GAME_VERSION)
		return 1;		//means old version
	else
		return 0;
}