Пример #1
0
/* Unlike the pthreads version, the id field is set by the caller.	*/
GC_thread GC_new_thread(DWORD id)
{
    word hv = ((word)id) % THREAD_TABLE_SZ;
    GC_thread result;
    /* It may not be safe to allocate when we register the first thread. */
    static struct GC_Thread_Rep first_thread;
    static GC_bool first_thread_used = FALSE;
    
    GC_ASSERT(I_HOLD_LOCK());
    if (!first_thread_used) {
    	result = &first_thread;
    	first_thread_used = TRUE;
    } else {
        GC_ASSERT(!GC_win32_dll_threads);
        result = (struct GC_Thread_Rep *)
        	 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
#       ifdef GC_PTHREADS
	  /* result can be NULL -> segfault */
	  GC_ASSERT(result -> flags == 0);
#       endif
    }
    if (result == 0) return(0);
    /* result -> id = id; Done by caller.	*/
    result -> next = GC_threads[hv];
    GC_threads[hv] = result;
#   ifdef GC_PTHREADS
      GC_ASSERT(result -> flags == 0 /* && result -> thread_blocked == 0 */);
#   endif
    return(result);
}
Пример #2
0
void GC_start_world(void)
{
  DWORD thread_id = GetCurrentThreadId();
  int i;
  LONG my_max = GC_get_max_thread_index();

  GC_ASSERT(I_HOLD_LOCK());
  if (GC_win32_dll_threads) {
    for (i = 0; i <= my_max; i++) {
      GC_thread t = (GC_thread)(dll_thread_table + i);
      if (t -> stack_base != 0 && t -> suspended
	  && t -> id != thread_id) {
        if (ResumeThread(t -> handle) == (DWORD)-1)
	  ABORT("ResumeThread failed");
        t -> suspended = FALSE;
      }
    }
  } else {
    GC_thread t;
    int i;

    for (i = 0; i < THREAD_TABLE_SZ; i++) {
      for (t = GC_threads[i]; t != 0; t = t -> next) {
        if (t -> stack_base != 0 && t -> suspended
	    && t -> id != thread_id) {
          if (ResumeThread(t -> handle) == (DWORD)-1)
	    ABORT("ResumeThread failed");
          t -> suspended = FALSE;
        }
      }
    }
  }
  GC_please_stop = FALSE;
}
Пример #3
0
/* This call must be made from the new thread.  */
GC_INNER void GC_init_thread_local(GC_tlfs p)
{
    int i, j;

    GC_ASSERT(I_HOLD_LOCK());
    if (!EXPECT(keys_initialized, TRUE)) {
        GC_ASSERT((word)&GC_thread_key % sizeof(word) == 0);
        if (0 != GC_key_create(&GC_thread_key, reset_thread_key)) {
            ABORT("Failed to create key for local allocator");
        }
        keys_initialized = TRUE;
    }
    if (0 != GC_setspecific(GC_thread_key, p)) {
        ABORT("Failed to set thread specific allocation pointers");
    }
    for (j = 0; j < TINY_FREELISTS; ++j) {
        for (i = 0; i < THREAD_FREELISTS_KINDS; ++i) {
            p -> _freelists[i][j] = (void *)(word)1;
        }
#       ifdef GC_GCJ_SUPPORT
            p -> gcj_freelists[j] = (void *)(word)1;
#       endif
#       ifdef ENABLE_DISCLAIM
            p -> finalized_freelists[j] = (void *)(word)1;
#       endif
    }
    /* The size 0 free lists are handled like the regular free lists,   */
    /* to ensure that the explicit deallocation works.  However,        */
    /* allocation of a size 0 "gcj" object is always an error.          */
#   ifdef GC_GCJ_SUPPORT
        p -> gcj_freelists[0] = ERROR_FL;
#   endif
}
Пример #4
0
void GC_push_thread_structures(void)
{
  GC_ASSERT(I_HOLD_LOCK());
  if (GC_win32_dll_threads) {
    /* Unlike the other threads implementations, the thread table here	*/
    /* contains no pointers to the collectable heap.  Thus we have	*/
    /* no private structures we need to preserve.			*/
#   ifdef GC_PTHREADS 
    { int i; /* pthreads may keep a pointer in the thread exit value */
      LONG my_max = GC_get_max_thread_index();

      for (i = 0; i <= my_max; i++)
        if (dll_thread_table[i].in_use)
	  GC_push_all((ptr_t)&(dll_thread_table[i].status),
                      (ptr_t)(&(dll_thread_table[i].status)+1));
    }
#   endif
  } else {
    GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
  }
# if defined(THREAD_LOCAL_ALLOC)
    GC_push_all((ptr_t)(&GC_thread_key),
      (ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key));
    /* Just in case we ever use our own TLS implementation.	*/
# endif
}
Пример #5
0
/* thread being deleted.					*/
void GC_delete_thread(DWORD id)
{
  if (GC_win32_dll_threads) {
    GC_thread t = GC_lookup_thread_inner(id);

    if (0 == t) {
      WARN("Removing nonexistent thread %ld\n", (GC_word)id);
    } else {
      GC_delete_gc_thread(t);
    }
  } else {
    word hv = ((word)id) % THREAD_TABLE_SZ;
    register GC_thread p = GC_threads[hv];
    register GC_thread prev = 0;
    
    GC_ASSERT(I_HOLD_LOCK());
    while (p -> id != id) {
        prev = p;
        p = p -> next;
    }
    CloseHandle(p->handle);
    if (prev == 0) {
        GC_threads[hv] = p -> next;
    } else {
        prev -> next = p -> next;
    }
    GC_INTERNAL_FREE(p);
  }
}
Пример #6
0
GC_bool GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
{
    int i;
    GC_thread p;
    
    GC_ASSERT(I_HOLD_LOCK());
#   ifdef PARALLEL_MARK
      for (i = 0; i < GC_markers; ++i) {
	if (marker_sp[i] > lo & marker_sp[i] < hi) return TRUE;
#       ifdef IA64
	  if (marker_bsp[i] > lo & marker_bsp[i] < hi) return TRUE;
#	endif
      }
#   endif
    for (i = 0; i < THREAD_TABLE_SZ; i++) {
      for (p = GC_threads[i]; p != 0; p = p -> next) {
	if (0 != p -> stack_end) {
#	  ifdef STACK_GROWS_UP
            if (p -> stack_end >= lo && p -> stack_end < hi) return TRUE;
#	  else /* STACK_GROWS_DOWN */
            if (p -> stack_end > lo && p -> stack_end <= hi) return TRUE;
#	  endif
	}
      }
    }
    return FALSE;
}
Пример #7
0
GC_INNER void GC_traverse_back_graph(void)
{
  GC_ASSERT(I_HOLD_LOCK());
  GC_max_height = 0;
  GC_apply_to_each_object(update_max_height);
  if (0 != GC_deepest_obj)
    GC_set_mark_bit(GC_deepest_obj);  /* Keep it until we can print it. */
}
Пример #8
0
void GC_push_thread_structures(void)
{
    GC_ASSERT(I_HOLD_LOCK());
    GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
#   if defined(THREAD_LOCAL_ALLOC)
      GC_push_all((ptr_t)(&GC_thread_key),
	  (ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key));
#   endif
}
Пример #9
0
GC_INNER void GC_register_displacement_inner(size_t offset)
{
    GC_ASSERT(I_HOLD_LOCK());
    if (offset >= VALID_OFFSET_SZ) {
        ABORT("Bad argument to GC_register_displacement");
    }
    if (!GC_valid_offsets[offset]) {
      GC_valid_offsets[offset] = TRUE;
      GC_modws_valid_offsets[offset % sizeof(word)] = TRUE;
    }
}
Пример #10
0
/* This call must be made from the new thread.  */
GC_INNER void GC_init_thread_local(GC_tlfs p)
{
    int i, j;

    GC_ASSERT(I_HOLD_LOCK());
    if (!EXPECT(keys_initialized, TRUE)) {
        GC_ASSERT((word)&GC_thread_key % sizeof(word) == 0);
        if (0 != GC_key_create(&GC_thread_key, reset_thread_key)) {
            ABORT("Failed to create key for local allocator");
        }
        keys_initialized = TRUE;
    }
    if (0 != GC_setspecific(GC_thread_key, p)) {
        ABORT("Failed to set thread specific allocation pointers");
    }
    for (i = 0; i < MAXOBJKINDS; ++i) {
        for (j = 1; j < TINY_FREELISTS; ++j) {
            p->freelists[i][j] = (void *)(word)1;
        }
        /* Set up the size 0 free lists.    */
        /* We now handle most of them like regular free lists, to ensure    */
        /* That explicit deallocation works.  However, allocation of a      */
        /* size 0 "gcj" object is always an error.                          */
#       ifdef GC_GCJ_SUPPORT
            if (i == GC_gcj_kind) {
                p->freelists[i][0] = ERROR_FL;
            } else {
#       endif
        /* else */ {
            p->freelists[i][0] = (void *)(word)1;
        }
    }
#   ifdef ENABLE_DISCLAIM
        for (i = 0; i < TINY_FREELISTS; ++i) {
            p -> finalized_freelists[i] = (void *)(word)1;
        }
        p->finalized_freelists[0] = (void *)(word)1;
#   endif
}

/* We hold the allocator lock.  */
GC_INNER void GC_destroy_thread_local(GC_tlfs p)
{
    int i;
    /* We currently only do this from the thread itself or from */
    /* the fork handler for a child process.                    */
    for (i = 0; i < MAXOBJKINDS; ++i) {
        return_freelists(p->freelists[i], GC_freelist[i]);
    }
#   ifdef ENABLE_DISCLAIM
        return_freelists(p -> finalized_freelists,
                         (void **)GC_finalized_objfreelist);
#   endif
}
Пример #11
0
/* that pointers past the first page are not relevant.                  */
GC_INNER void * GC_generic_malloc_inner_ignore_off_page(size_t lb, int k)
{
    word lb_adjusted;
    void * op;

    GC_ASSERT(I_HOLD_LOCK());
    if (lb <= HBLKSIZE)
        return(GC_generic_malloc_inner(lb, k));
    lb_adjusted = ADD_SLOP(lb);
    op = GC_alloc_large_and_clear(lb_adjusted, k, IGNORE_OFF_PAGE);
    GC_bytes_allocd += lb_adjusted;
    return op;
}
Пример #12
0
GC_INNER void GC_start_debugging_inner(void)
{
  GC_ASSERT(I_HOLD_LOCK());
# ifndef SHORT_DBG_HDRS
    GC_check_heap = GC_check_heap_proc;
    GC_print_all_smashed = GC_print_all_smashed_proc;
# else
    GC_check_heap = GC_do_nothing;
    GC_print_all_smashed = GC_do_nothing;
# endif
  GC_print_heap_obj = GC_debug_print_heap_obj_proc;
  GC_debugging_started = TRUE;
  GC_register_displacement_inner((word)sizeof(oh));
}
Пример #13
0
/* EXTRA_BYTES were already added to lb.                                */
STATIC ptr_t GC_alloc_large_and_clear(size_t lb, int k, unsigned flags)
{
    ptr_t result;
    word n_blocks = OBJ_SZ_TO_BLOCKS(lb);

    GC_ASSERT(I_HOLD_LOCK());
    result = GC_alloc_large(lb, k, flags);
    if (result != NULL
            && (GC_debugging_started || GC_obj_kinds[k].ok_init)) {
        /* Clear the whole block, in case of GC_realloc call. */
        BZERO(result, n_blocks * HBLKSIZE);
    }
    return result;
}
Пример #14
0
/* Called by GC_init() - we hold the allocation lock.	*/
void GC_thr_init(void) {
    struct GC_stack_base sb;
    int sb_result;

    GC_ASSERT(I_HOLD_LOCK());
    if (GC_thr_initialized) return;
    GC_main_thread = GetCurrentThreadId();
    GC_thr_initialized = TRUE;

    /* Add the initial thread, so we can stop it.	*/
    sb_result = GC_get_stack_base(&sb);
    GC_ASSERT(sb_result == GC_SUCCESS);
    GC_register_my_thread(&sb);
}
Пример #15
0
/* require special handling on allocation.      */
GC_INNER void * GC_generic_malloc_inner(size_t lb, int k)
{
    void *op;

    GC_ASSERT(I_HOLD_LOCK());
    if(SMALL_OBJ(lb)) {
        struct obj_kind * kind = GC_obj_kinds + k;
        size_t lg = GC_size_map[lb];
        void ** opp = &(kind -> ok_freelist[lg]);

        op = *opp;
        if (EXPECT(0 == op, FALSE)) {
            if (lg == 0) {
                if (!EXPECT(GC_is_initialized, TRUE)) {
                    DCL_LOCK_STATE;
                    UNLOCK(); /* just to unset GC_lock_holder */
                    GC_init();
                    LOCK();
                    lg = GC_size_map[lb];
                }
                if (0 == lg) {
                    GC_extend_size_map(lb);
                    lg = GC_size_map[lb];
                    GC_ASSERT(lg != 0);
                }
                /* Retry */
                opp = &(kind -> ok_freelist[lg]);
                op = *opp;
            }
            if (0 == op) {
                if (0 == kind -> ok_reclaim_list &&
                        !GC_alloc_reclaim_list(kind))
                    return NULL;
                op = GC_allocobj(lg, k);
                if (0 == op)
                    return NULL;
            }
        }
        *opp = obj_link(op);
        obj_link(op) = 0;
        GC_bytes_allocd += GRANULES_TO_BYTES(lg);
    } else {
        op = (ptr_t)GC_alloc_large_and_clear(ADD_SLOP(lb), k, 0);
        GC_bytes_allocd += lb;
    }

    return op;
}
Пример #16
0
  /* case, we need to make sure that all objects have debug headers.    */
  GC_INNER void * GC_debug_generic_malloc_inner(size_t lb, int k)
  {
    void * result;

    GC_ASSERT(I_HOLD_LOCK());
    result = GC_generic_malloc_inner(SIZET_SAT_ADD(lb, DEBUG_BYTES), k);
    if (NULL == result) {
        GC_err_printf("GC internal allocation (%lu bytes) returning NULL\n",
                       (unsigned long) lb);
        return(0);
    }
    if (!GC_debugging_started) {
        GC_start_debugging_inner();
    }
    ADD_CALL_CHAIN(result, GC_RETURN_ADDR);
    return (GC_store_debug_info_inner(result, (word)lb, "INTERNAL", 0));
  }
Пример #17
0
/* EXTRA_BYTES were already added to lb.        */
GC_INNER ptr_t GC_alloc_large(size_t lb, int k, unsigned flags)
{
    struct hblk * h;
    word n_blocks;
    ptr_t result;
    GC_bool retry = FALSE;

    GC_ASSERT(I_HOLD_LOCK());
    lb = ROUNDUP_GRANULE_SIZE(lb);
    n_blocks = OBJ_SZ_TO_BLOCKS(lb);
    if (!EXPECT(GC_is_initialized, TRUE)) {
        DCL_LOCK_STATE;
        UNLOCK(); /* just to unset GC_lock_holder */
        GC_init();
        LOCK();
    }
    /* Do our share of marking work */
    if (GC_incremental && !GC_dont_gc)
        GC_collect_a_little_inner((int)n_blocks);
    h = GC_allochblk(lb, k, flags);
#   ifdef USE_MUNMAP
    if (0 == h) {
        GC_merge_unmapped();
        h = GC_allochblk(lb, k, flags);
    }
#   endif
    while (0 == h && GC_collect_or_expand(n_blocks, flags != 0, retry)) {
        h = GC_allochblk(lb, k, flags);
        retry = TRUE;
    }
    if (h == 0) {
        result = 0;
    } else {
        size_t total_bytes = n_blocks * HBLKSIZE;
        if (n_blocks > 1) {
            GC_large_allocd_bytes += total_bytes;
            if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
                GC_max_large_allocd_bytes = GC_large_allocd_bytes;
        }
        /* FIXME: Do we need some way to reset GC_max_large_allocd_bytes? */
        result = h -> hb_body;
    }
    return result;
}
Пример #18
0
void GC_stop_world(void)
{
  DWORD thread_id = GetCurrentThreadId();
  int i;

  if (!GC_thr_initialized) ABORT("GC_stop_world() called before GC_thr_init()");
  GC_ASSERT(I_HOLD_LOCK());

  GC_please_stop = TRUE;
# ifndef CYGWIN32
    EnterCriticalSection(&GC_write_cs);
# endif
  if (GC_win32_dll_threads) {
    /* Any threads being created during this loop will end up setting   */
    /* GC_attached_thread when they start.  This will force marking to  */
    /* restart.								*/
    /* This is not ideal, but hopefully correct.			*/
    GC_attached_thread = FALSE;
    for (i = 0; i <= GC_get_max_thread_index(); i++) {
      GC_vthread t = dll_thread_table + i;
      if (t -> stack_base != 0
	  && t -> id != thread_id) {
	  GC_suspend((GC_thread)t);
      }
    }
  } else {
      GC_thread t;
      int i;

      for (i = 0; i < THREAD_TABLE_SZ; i++) {
        for (t = GC_threads[i]; t != 0; t = t -> next) {
	  if (t -> stack_base != 0
	  && !KNOWN_FINISHED(t)
	  && t -> id != thread_id) {
	    GC_suspend(t);
	  }
	}
      }
  }
# ifndef CYGWIN32
    LeaveCriticalSection(&GC_write_cs);
# endif    
}
Пример #19
0
void GC_wait_for_gc_completion(GC_bool wait_for_all)
{
    GC_ASSERT(I_HOLD_LOCK());
    if (GC_incremental && GC_collection_in_progress()) {
	int old_gc_no = GC_gc_no;

	/* Make sure that no part of our stack is still on the mark stack, */
	/* since it's about to be unmapped.				   */
	while (GC_incremental && GC_collection_in_progress()
	       && (wait_for_all || old_gc_no == GC_gc_no)) {
	    ENTER_GC();
	    GC_in_thread_creation = TRUE;
            GC_collect_a_little_inner(1);
	    GC_in_thread_creation = FALSE;
	    EXIT_GC();
	    UNLOCK();
	    sched_yield();
	    LOCK();
	}
    }
}
Пример #20
0
/* (The code intentionally traps if it wasn't.)			*/
void GC_delete_thread(pthread_t id)
{
    int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
    register GC_thread p = GC_threads[hv];
    register GC_thread prev = 0;
    
    GC_ASSERT(I_HOLD_LOCK());
    while (!THREAD_EQUAL(p -> id, id)) {
        prev = p;
        p = p -> next;
    }
    if (prev == 0) {
        GC_threads[hv] = p -> next;
    } else {
        prev -> next = p -> next;
    }
#   ifdef GC_DARWIN_THREADS
	mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
#   endif
    GC_INTERNAL_FREE(p);
}
Пример #21
0
void GC_print_back_graph_stats(void)
{
  GC_ASSERT(I_HOLD_LOCK());
  GC_printf("Maximum backwards height of reachable objects at GC %lu is %lu\n",
            (unsigned long) GC_gc_no, (unsigned long)GC_max_height);
  if (GC_max_height > GC_max_max_height) {
    ptr_t obj = GC_deepest_obj;

    GC_max_max_height = GC_max_height;
    UNLOCK();
    GC_err_printf(
            "The following unreachable object is last in a longest chain "
            "of unreachable objects:\n");
    GC_print_heap_obj(obj);
    LOCK();
  }
  GC_COND_LOG_PRINTF("Needed max total of %d back-edge structs\n",
                     GC_n_back_edge_structs);
  GC_apply_to_each_object(reset_back_edge);
  GC_deepest_obj = 0;
}
Пример #22
0
/* immediately preceding memory stack.				*/
ptr_t GC_greatest_stack_base_below(ptr_t bound)
{
    int i;
    GC_thread p;
    ptr_t result = 0;
    
    GC_ASSERT(I_HOLD_LOCK());
#   ifdef PARALLEL_MARK
      for (i = 0; i < GC_markers; ++i) {
	if (marker_sp[i] > result && marker_sp[i] < bound)
	  result = marker_sp[i];
      }
#   endif
    for (i = 0; i < THREAD_TABLE_SZ; i++) {
      for (p = GC_threads[i]; p != 0; p = p -> next) {
	if (p -> stack_end > result && p -> stack_end < bound) {
	  result = p -> stack_end;
	}
      }
    }
    return result;
}
Пример #23
0
/* Caller holds allocation lock.					*/
GC_thread GC_new_thread(pthread_t id)
{
    int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
    GC_thread result;
    static GC_bool first_thread_used = FALSE;
    
    GC_ASSERT(I_HOLD_LOCK());
    if (!first_thread_used) {
    	result = &first_thread;
    	first_thread_used = TRUE;
    } else {
        result = (struct GC_Thread_Rep *)
        	 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
	GC_ASSERT(result -> flags == 0);
    }
    if (result == 0) return(0);
    result -> id = id;
    result -> next = GC_threads[hv];
    GC_threads[hv] = result;
    GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
    return(result);
}
Пример #24
0
/* update both *table and *log_size_ptr.  Lock is held.                 */
STATIC void GC_grow_table(struct hash_chain_entry ***table,
                          signed_word *log_size_ptr)
{
    register word i;
    register struct hash_chain_entry *p;
    signed_word log_old_size = *log_size_ptr;
    signed_word log_new_size = log_old_size + 1;
    word old_size = log_old_size == -1 ? 0 : (word)1 << log_old_size;
    word new_size = (word)1 << log_new_size;
    /* FIXME: Power of 2 size often gets rounded up to one more page. */
    struct hash_chain_entry **new_table;

    GC_ASSERT(I_HOLD_LOCK());
    new_table = (struct hash_chain_entry **)
                    GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
                        (size_t)new_size * sizeof(struct hash_chain_entry *),
                        NORMAL);
    if (new_table == 0) {
        if (*table == 0) {
            ABORT("Insufficient space for initial table allocation");
        } else {
            return;
        }
    }
    for (i = 0; i < old_size; i++) {
      p = (*table)[i];
      while (p != 0) {
        ptr_t real_key = GC_REVEAL_POINTER(p -> hidden_key);
        struct hash_chain_entry *next = p -> next;
        size_t new_hash = HASH3(real_key, new_size, log_new_size);

        p -> next = new_table[new_hash];
        new_table[new_hash] = p;
        p = next;
      }
    }
    *log_size_ptr = log_new_size;
    *table = new_table;
}
Пример #25
0
/* thread being deleted.					*/
void GC_delete_gc_thread(GC_vthread gc_id)
{
  CloseHandle(gc_id->handle);
  if (GC_win32_dll_threads) {
    /* This is intended to be lock-free.				*/
    /* It is either called synchronously from the thread being deleted,	*/
    /* or by the joining thread.					*/
    /* In this branch asynchronosu changes to *gc_id are possible.	*/
    gc_id -> stack_base = 0;
    gc_id -> id = 0;
#   ifdef CYGWIN32
      gc_id -> pthread_id = 0;
#   endif /* CYGWIN32 */
#   ifdef GC_WIN32_PTHREADS
      gc_id -> pthread_id.p = NULL;
#   endif /* GC_WIN32_PTHREADS */
    AO_store_release(&(gc_id->in_use), FALSE);
  } else {
    /* Cast away volatile qualifier, since we have lock. */
    GC_thread gc_nvid = (GC_thread)gc_id;
    DWORD id = gc_nvid -> id;
    word hv = ((word)id) % THREAD_TABLE_SZ;
    register GC_thread p = GC_threads[hv];
    register GC_thread prev = 0;

    GC_ASSERT(I_HOLD_LOCK());
    while (p != gc_nvid) {
        prev = p;
        p = p -> next;
    }
    if (prev == 0) {
        GC_threads[hv] = p -> next;
    } else {
        prev -> next = p -> next;
    }
    GC_INTERNAL_FREE(p);
  }
}
/* This call must be made from the new thread.  */
GC_INNER void GC_init_thread_local(GC_tlfs p)
{
    int i;

    GC_ASSERT(I_HOLD_LOCK());
    if (!EXPECT(keys_initialized, TRUE)) {
        GC_ASSERT((word)&GC_thread_key % sizeof(word) == 0);
        if (0 != GC_key_create(&GC_thread_key, reset_thread_key)) {
            ABORT("Failed to create key for local allocator");
        }
        keys_initialized = TRUE;
    }
    if (0 != GC_setspecific(GC_thread_key, p)) {
        ABORT("Failed to set thread specific allocation pointers");
    }
    for (i = 1; i < TINY_FREELISTS; ++i) {
        p -> ptrfree_freelists[i] = (void *)(word)1;
        p -> normal_freelists[i] = (void *)(word)1;
#       ifdef GC_GCJ_SUPPORT
          p -> gcj_freelists[i] = (void *)(word)1;
#       endif
#       ifdef ENABLE_DISCLAIM
          p -> finalized_freelists[i] = (void *)(word)1;
#       endif
    }
    /* Set up the size 0 free lists.    */
    /* We now handle most of them like regular free lists, to ensure    */
    /* That explicit deallocation works.  However, allocation of a      */
    /* size 0 "gcj" object is always an error.                          */
    p -> ptrfree_freelists[0] = (void *)(word)1;
    p -> normal_freelists[0] = (void *)(word)1;
#   ifdef GC_GCJ_SUPPORT
        p -> gcj_freelists[0] = ERROR_FL;
#   endif
#   ifdef ENABLE_DISCLAIM
        p -> finalized_freelists[0] = (void *)(word)1;
#   endif
}
Пример #27
0
/* Also used (for assertion checking only) from thread_local_alloc.c.	*/
GC_thread GC_lookup_thread_inner(DWORD thread_id) {
  if (GC_win32_dll_threads) {
    int i;
    LONG my_max = GC_get_max_thread_index();
    for (i = 0;
       i <= my_max &&
       (!AO_load_acquire(&(dll_thread_table[i].in_use))
	|| dll_thread_table[i].id != thread_id);
       /* Must still be in_use, since nobody else can store our thread_id. */
       i++) {}
    if (i > my_max) {
      return 0;
    } else {
      return (GC_thread)(dll_thread_table + i);
    }
  } else {
    word hv = ((word)thread_id) % THREAD_TABLE_SZ;
    register GC_thread p = GC_threads[hv];
    
    GC_ASSERT(I_HOLD_LOCK());
    while (p != 0 && p -> id != thread_id) p = p -> next;
    return(p);
  }
}
Пример #28
0
GC_INNER void *GC_store_debug_info_inner(void *p, word sz GC_ATTR_UNUSED,
                                         const char *string, int linenum)
{
    word * result = (word *)((oh *)p + 1);

    GC_ASSERT(I_HOLD_LOCK());
    GC_ASSERT(GC_size(p) >= sizeof(oh) + sz);
    GC_ASSERT(!(SMALL_OBJ(sz) && CROSSES_HBLK((ptr_t)p, sz)));
#   ifdef KEEP_BACK_PTRS
      ((oh *)p) -> oh_back_ptr = HIDE_BACK_PTR(NOT_MARKED);
#   endif
#   ifdef MAKE_BACK_GRAPH
      ((oh *)p) -> oh_bg_ptr = HIDE_BACK_PTR((ptr_t)0);
#   endif
    ((oh *)p) -> oh_string = string;
    ((oh *)p) -> oh_int = (word)linenum;
#   ifndef SHORT_DBG_HDRS
      ((oh *)p) -> oh_sz = sz;
      ((oh *)p) -> oh_sf = START_FLAG ^ (word)result;
      ((word *)p)[BYTES_TO_WORDS(GC_size(p))-1] =
         result[SIMPLE_ROUNDED_UP_WORDS(sz)] = END_FLAG ^ (word)result;
#   endif
    return result;
}
Пример #29
0
  GC_INNER void GC_process_togglerefs(void)
  {
    int i;
    int new_size = 0;

    GC_ASSERT(I_HOLD_LOCK());
    for (i = 0; i < GC_toggleref_array_size; ++i) {
      GCToggleRef r = GC_toggleref_arr[i];
      void *obj = r.strong_ref;

      if (((word)obj & 1) != 0) {
        obj = GC_REVEAL_POINTER(r.weak_ref);
      }
      if (NULL == obj) {
        continue;
      }
      switch (GC_toggleref_callback(obj)) {
      case GC_TOGGLE_REF_DROP:
        break;
      case GC_TOGGLE_REF_STRONG:
        GC_toggleref_arr[new_size++].strong_ref = obj;
        break;
      case GC_TOGGLE_REF_WEAK:
        GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
        break;
      default:
        ABORT("Bad toggle-ref status returned by callback");
      }
    }

    if (new_size < GC_toggleref_array_size) {
      BZERO(&GC_toggleref_arr[new_size],
            (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
      GC_toggleref_array_size = new_size;
    }
  }
Пример #30
0
  /* this function is called repeatedly by GC_register_map_entries.     */
  GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e)
  {
    int i;
    GC_bool rebuild = FALSE;

    GC_ASSERT(I_HOLD_LOCK());
    GC_ASSERT((word)b % sizeof(word) == 0 && (word)e % sizeof(word) == 0);
    for (i = 0; i < n_root_sets; i++) {
      ptr_t r_start, r_end;

      if (GC_static_roots[i].r_tmp) {
        /* The remaining roots are skipped as they are all temporary. */
#       ifdef GC_ASSERTIONS
          int j;
          for (j = i + 1; j < n_root_sets; j++) {
            GC_ASSERT(GC_static_roots[j].r_tmp);
          }
#       endif
        break;
      }
      r_start = GC_static_roots[i].r_start;
      r_end = GC_static_roots[i].r_end;
      if (!EXPECT((word)e <= (word)r_start || (word)r_end <= (word)b, TRUE)) {
#       ifdef DEBUG_ADD_DEL_ROOTS
          GC_log_printf("Removing %p .. %p from root section %d (%p .. %p)\n",
                        (void *)b, (void *)e,
                        i, (void *)r_start, (void *)r_end);
#       endif
        if ((word)r_start < (word)b) {
          GC_root_size -= r_end - b;
          GC_static_roots[i].r_end = b;
          /* No need to rebuild as hash does not use r_end value. */
          if ((word)e < (word)r_end) {
            int j;

            if (rebuild) {
              GC_rebuild_root_index();
              rebuild = FALSE;
            }
            GC_add_roots_inner(e, r_end, FALSE); /* updates n_root_sets */
            for (j = i + 1; j < n_root_sets; j++)
              if (GC_static_roots[j].r_tmp)
                break;
            if (j < n_root_sets-1 && !GC_static_roots[n_root_sets-1].r_tmp) {
              /* Exchange the roots to have all temporary ones at the end. */
              ptr_t tmp_r_start = GC_static_roots[j].r_start;
              ptr_t tmp_r_end = GC_static_roots[j].r_end;

              GC_static_roots[j].r_start =
                                GC_static_roots[n_root_sets-1].r_start;
              GC_static_roots[j].r_end = GC_static_roots[n_root_sets-1].r_end;
              GC_static_roots[j].r_tmp = FALSE;
              GC_static_roots[n_root_sets-1].r_start = tmp_r_start;
              GC_static_roots[n_root_sets-1].r_end = tmp_r_end;
              GC_static_roots[n_root_sets-1].r_tmp = TRUE;
              rebuild = TRUE;
            }
          }
        } else {
          if ((word)e < (word)r_end) {
            GC_root_size -= e - r_start;
            GC_static_roots[i].r_start = e;
          } else {
            GC_remove_root_at_pos(i);
            i--;
          }
          rebuild = TRUE;
        }
      }
    }
    if (rebuild)
      GC_rebuild_root_index();
  }