Exemplo n.º 1
0
/* Print all objects on the list.  Clear the list.      */
STATIC void GC_print_all_smashed_proc(void)
{
    unsigned i;

    GC_ASSERT(I_DONT_HOLD_LOCK());
    if (GC_n_smashed == 0) return;
    GC_err_printf("GC_check_heap_block: found %u smashed heap objects:\n",
                  GC_n_smashed);
    for (i = 0; i < GC_n_smashed; ++i) {
        ptr_t base = (ptr_t)GC_base(GC_smashed[i]);

#       ifdef LINT2
          if (!base) ABORT("Invalid GC_smashed element");
#       endif
        GC_print_smashed_obj("", base + sizeof(oh), GC_smashed[i]);
        GC_smashed[i] = 0;
    }
    GC_n_smashed = 0;
}
Exemplo n.º 2
0
void GC_stop_world(void)
{
  DWORD thread_id = GetCurrentThreadId();
  int i;

  if (!GC_thr_initialized) ABORT("GC_stop_world() called before GC_thr_init()");
  GC_ASSERT(I_HOLD_LOCK());

  GC_please_stop = TRUE;
# ifndef CYGWIN32
    EnterCriticalSection(&GC_write_cs);
# endif
  if (GC_win32_dll_threads) {
    /* Any threads being created during this loop will end up setting   */
    /* GC_attached_thread when they start.  This will force marking to  */
    /* restart.								*/
    /* This is not ideal, but hopefully correct.			*/
    GC_attached_thread = FALSE;
    for (i = 0; i <= GC_get_max_thread_index(); i++) {
      GC_vthread t = dll_thread_table + i;
      if (t -> stack_base != 0
	  && t -> id != thread_id) {
	  GC_suspend((GC_thread)t);
      }
    }
  } else {
      GC_thread t;
      int i;

      for (i = 0; i < THREAD_TABLE_SZ; i++) {
        for (t = GC_threads[i]; t != 0; t = t -> next) {
	  if (t -> stack_base != 0
	  && !KNOWN_FINISHED(t)
	  && t -> id != thread_id) {
	    GC_suspend(t);
	  }
	}
      }
  }
# ifndef CYGWIN32
    LeaveCriticalSection(&GC_write_cs);
# endif    
}
Exemplo n.º 3
0
/* Ensure that p has a back_edges structure associated with it.	*/
static void ensure_struct(ptr_t p)
{
  ptr_t old_back_ptr = GET_OH_BG_PTR(p);

  if (!((word)old_back_ptr & FLAG_MANY)) {
    back_edges *be = new_back_edges();
    be -> flags = 0;
    if (0 == old_back_ptr) {
      be -> n_edges = 0;
    } else {
      be -> n_edges = 1;
      be -> edges[0] = old_back_ptr;
    }
    be -> height = HEIGHT_UNKNOWN;
    be -> height_gc_no = GC_gc_no - 1;
    GC_ASSERT(be >= back_edge_space);
    SET_OH_BG_PTR(p, (word)be | FLAG_MANY);
  }
}
Exemplo n.º 4
0
  GC_INNER void * GC_debug_generic_malloc_inner_ignore_off_page(size_t lb,
                                                                int k)
  {
    void * result;

    GC_ASSERT(I_HOLD_LOCK());
    result = GC_generic_malloc_inner_ignore_off_page(
                                SIZET_SAT_ADD(lb, DEBUG_BYTES), k);
    if (NULL == result) {
        GC_err_printf("GC internal allocation (%lu bytes) returning NULL\n",
                       (unsigned long) lb);
        return(0);
    }
    if (!GC_debugging_started) {
        GC_start_debugging_inner();
    }
    ADD_CALL_CHAIN(result, GC_RETURN_ADDR);
    return (GC_store_debug_info_inner(result, (word)lb, "INTERNAL", 0));
  }
Exemplo n.º 5
0
/* to somewhere inside an object with the debugging info.               */
STATIC void GC_print_obj(ptr_t p)
{
    oh * ohdr = (oh *)GC_base(p);

    GC_ASSERT(I_DONT_HOLD_LOCK());
#   ifdef LINT2
      if (!ohdr) ABORT("Invalid GC_print_obj argument");
#   endif
    GC_err_printf("%p (", ((ptr_t)ohdr + sizeof(oh)));
    GC_err_puts(ohdr -> oh_string);
#   ifdef SHORT_DBG_HDRS
      GC_err_printf(":%d, ", GET_OH_LINENUM(ohdr));
#   else
      GC_err_printf(":%d, sz=%lu, ",
                    GET_OH_LINENUM(ohdr), (unsigned long)(ohdr -> oh_sz));
#   endif
    GC_print_type((ptr_t)(ohdr + 1));
    GC_err_puts(")\n");
    PRINT_CALL_CHAIN(ohdr);
}
Exemplo n.º 6
0
void GC_notify_or_invoke_finalizers(void)
{
    /* This is a convenient place to generate backtraces if appropriate, */
    /* since that code is not callable with the allocation lock.	 */
#   ifdef KEEP_BACK_PTRS
      if (GC_backtraces > 0) {
	static word last_back_trace_gc_no = 3;	/* Skip early ones. */
	long i;

	LOCK();
	if (GC_gc_no > last_back_trace_gc_no) {
	  /* Stops when GC_gc_no wraps; that's OK.	*/
	    last_back_trace_gc_no = (word)(-1);  /* disable others. */
	    for (i = 0; i < GC_backtraces; ++i) {
	      /* FIXME: This tolerates concurrent heap mutation,	*/
	      /* which may cause occasional mysterious results.		*/
	      /* We need to release the GC lock, since GC_print_callers	*/
	      /* acquires it.  It probably shouldn't.			*/
	      UNLOCK();
	      GC_generate_random_backtrace_no_gc();
	      LOCK();
	    }
	    last_back_trace_gc_no = GC_gc_no;
	}
	UNLOCK();
      }
#   endif
    if (GC_finalize_now == 0) return;
    {
	(void) GC_invoke_finalizers();
#	ifndef THREADS
	  GC_ASSERT(GC_finalize_now == 0);
#	endif	/* Otherwise GC can run concurrently and add more */
	return;
    }
    if (GC_finalizer_notifier != (void (*)(void))0
	&& last_finalizer_notification != GC_gc_no) {
	last_finalizer_notification = GC_gc_no;
	GC_finalizer_notifier();
    }
}
Exemplo n.º 7
0
void GC_wait_for_gc_completion(GC_bool wait_for_all)
{
    GC_ASSERT(I_HOLD_LOCK());
    if (GC_incremental && GC_collection_in_progress()) {
	int old_gc_no = GC_gc_no;

	/* Make sure that no part of our stack is still on the mark stack, */
	/* since it's about to be unmapped.				   */
	while (GC_incremental && GC_collection_in_progress()
	       && (wait_for_all || old_gc_no == GC_gc_no)) {
	    ENTER_GC();
	    GC_in_thread_creation = TRUE;
            GC_collect_a_little_inner(1);
	    GC_in_thread_creation = FALSE;
	    EXIT_GC();
	    UNLOCK();
	    sched_yield();
	    LOCK();
	}
    }
}
Exemplo n.º 8
0
void GC_print_smashed_obj(ptr_t p, ptr_t clobbered_addr)
{
    register oh * ohdr = (oh *)GC_base(p);
    
    GC_ASSERT(I_DONT_HOLD_LOCK());
    GC_err_printf("%p in object at %p(", clobbered_addr, p);
    if (clobbered_addr <= (ptr_t)(&(ohdr -> oh_sz))
        || ohdr -> oh_string == 0) {
        GC_err_printf("<smashed>, appr. sz = %ld)\n",
        	       (GC_size((ptr_t)ohdr) - DEBUG_BYTES));
    } else {
        if (ohdr -> oh_string[0] == '\0') {
            GC_err_puts("EMPTY(smashed?)");
        } else {
            GC_err_puts(ohdr -> oh_string);
        }
        GC_err_printf(":%ld, sz=%ld)\n", (unsigned long)(ohdr -> oh_int),
        			          (unsigned long)(ohdr -> oh_sz));
        PRINT_CALL_CHAIN(ohdr);
    }
}
Exemplo n.º 9
0
void GC_print_back_graph_stats(void)
{
  GC_ASSERT(I_HOLD_LOCK());
  GC_printf("Maximum backwards height of reachable objects at GC %lu is %lu\n",
            (unsigned long) GC_gc_no, (unsigned long)GC_max_height);
  if (GC_max_height > GC_max_max_height) {
    ptr_t obj = GC_deepest_obj;

    GC_max_max_height = GC_max_height;
    UNLOCK();
    GC_err_printf(
            "The following unreachable object is last in a longest chain "
            "of unreachable objects:\n");
    GC_print_heap_obj(obj);
    LOCK();
  }
  GC_COND_LOG_PRINTF("Needed max total of %d back-edge structs\n",
                     GC_n_back_edge_structs);
  GC_apply_to_each_object(reset_back_edge);
  GC_deepest_obj = 0;
}
Exemplo n.º 10
0
/* (The code intentionally traps if it wasn't.)			*/
void GC_delete_thread(pthread_t id)
{
    int hv = NUMERIC_THREAD_ID(id) % THREAD_TABLE_SZ;
    register GC_thread p = GC_threads[hv];
    register GC_thread prev = 0;
    
    GC_ASSERT(I_HOLD_LOCK());
    while (!THREAD_EQUAL(p -> id, id)) {
        prev = p;
        p = p -> next;
    }
    if (prev == 0) {
        GC_threads[hv] = p -> next;
    } else {
        prev -> next = p -> next;
    }
#   ifdef GC_DARWIN_THREADS
	mach_port_deallocate(mach_task_self(), p->stop_info.mach_thread);
#   endif
    GC_INTERNAL_FREE(p);
}
Exemplo n.º 11
0
  void * GC_gcj_malloc(size_t lb, void * ptr_to_struct_containing_descr)
#endif
{
    ptr_t op;
    ptr_t * opp;
    word lg;
    DCL_LOCK_STATE;

    if(SMALL_OBJ(lb)) {
	lg = GC_size_map[lb];
	opp = &(GC_gcjobjfreelist[lg]);
	LOCK();
	op = *opp;
        if(EXPECT(op == 0, 0)) {
	    maybe_finalize();
            op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind);
	    if (0 == op) {
		UNLOCK();
		return(GC_oom_fn(lb));
	    }
        } else {
            *opp = obj_link(op);
            GC_bytes_allocd += GRANULES_TO_BYTES(lg);
        }
	*(void **)op = ptr_to_struct_containing_descr;
	GC_ASSERT(((void **)op)[1] == 0);
	UNLOCK();
    } else {
	LOCK();
	maybe_finalize();
	op = (ptr_t)GENERAL_MALLOC((word)lb, GC_gcj_kind);
	if (0 == op) {
	    UNLOCK();
	    return(GC_oom_fn(lb));
	}
	*(void **)op = ptr_to_struct_containing_descr;
	UNLOCK();
    }
    return((void *) op);
}
Exemplo n.º 12
0
/* update both *table and *log_size_ptr.  Lock is held.                 */
STATIC void GC_grow_table(struct hash_chain_entry ***table,
                          signed_word *log_size_ptr)
{
    register word i;
    register struct hash_chain_entry *p;
    signed_word log_old_size = *log_size_ptr;
    signed_word log_new_size = log_old_size + 1;
    word old_size = log_old_size == -1 ? 0 : (word)1 << log_old_size;
    word new_size = (word)1 << log_new_size;
    /* FIXME: Power of 2 size often gets rounded up to one more page. */
    struct hash_chain_entry **new_table;

    GC_ASSERT(I_HOLD_LOCK());
    new_table = (struct hash_chain_entry **)
                    GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
                        (size_t)new_size * sizeof(struct hash_chain_entry *),
                        NORMAL);
    if (new_table == 0) {
        if (*table == 0) {
            ABORT("Insufficient space for initial table allocation");
        } else {
            return;
        }
    }
    for (i = 0; i < old_size; i++) {
      p = (*table)[i];
      while (p != 0) {
        ptr_t real_key = GC_REVEAL_POINTER(p -> hidden_key);
        struct hash_chain_entry *next = p -> next;
        size_t new_hash = HASH3(real_key, new_size, log_new_size);

        p -> next = new_table[new_hash];
        new_table[new_hash] = p;
        p = next;
      }
    }
    *log_size_ptr = log_new_size;
    *table = new_table;
}
Exemplo n.º 13
0
/* immediately preceding memory stack.				*/
ptr_t GC_greatest_stack_base_below(ptr_t bound)
{
    int i;
    GC_thread p;
    ptr_t result = 0;
    
    GC_ASSERT(I_HOLD_LOCK());
#   ifdef PARALLEL_MARK
      for (i = 0; i < GC_markers; ++i) {
	if (marker_sp[i] > result && marker_sp[i] < bound)
	  result = marker_sp[i];
      }
#   endif
    for (i = 0; i < THREAD_TABLE_SZ; i++) {
      for (p = GC_threads[i]; p != 0; p = p -> next) {
	if (p -> stack_end > result && p -> stack_end < bound) {
	  result = p -> stack_end;
	}
      }
    }
    return result;
}
Exemplo n.º 14
0
/* Note that even the slow path doesn't lock.	*/
void *  PREFIXED(slow_getspecific) (tsd * key, unsigned long qtid,
				    tse * volatile * cache_ptr) {
    pthread_t self = pthread_self();
    unsigned hash_val = HASH(self);
    tse *entry = key -> hash[hash_val];

    GC_ASSERT(qtid != INVALID_QTID);
    while (entry != NULL && entry -> thread != self) {
	entry = entry -> next;
    } 
    if (entry == NULL) return NULL;
    /* Set cache_entry.		*/
        entry -> qtid = qtid;
		/* It's safe to do this asynchronously.  Either value 	*/
		/* is safe, though may produce spurious misses.		*/
		/* We're replacing one qtid with another one for the	*/
		/* same thread.						*/
	*cache_ptr = entry;
		/* Again this is safe since pointer assignments are 	*/
		/* presumed atomic, and either pointer is valid.	*/
    return entry -> value;
}
Exemplo n.º 15
0
/* thread being deleted.					*/
void GC_delete_gc_thread(GC_vthread gc_id)
{
  CloseHandle(gc_id->handle);
  if (GC_win32_dll_threads) {
    /* This is intended to be lock-free.				*/
    /* It is either called synchronously from the thread being deleted,	*/
    /* or by the joining thread.					*/
    /* In this branch asynchronosu changes to *gc_id are possible.	*/
    gc_id -> stack_base = 0;
    gc_id -> id = 0;
#   ifdef CYGWIN32
      gc_id -> pthread_id = 0;
#   endif /* CYGWIN32 */
#   ifdef GC_WIN32_PTHREADS
      gc_id -> pthread_id.p = NULL;
#   endif /* GC_WIN32_PTHREADS */
    AO_store_release(&(gc_id->in_use), FALSE);
  } else {
    /* Cast away volatile qualifier, since we have lock. */
    GC_thread gc_nvid = (GC_thread)gc_id;
    DWORD id = gc_nvid -> id;
    word hv = ((word)id) % THREAD_TABLE_SZ;
    register GC_thread p = GC_threads[hv];
    register GC_thread prev = 0;

    GC_ASSERT(I_HOLD_LOCK());
    while (p != gc_nvid) {
        prev = p;
        p = p -> next;
    }
    if (prev == 0) {
        GC_threads[hv] = p -> next;
    } else {
        prev -> next = p -> next;
    }
    GC_INTERNAL_FREE(p);
  }
}
Exemplo n.º 16
0
/* is p.                                                                */
STATIC void GC_print_type(ptr_t p)
{
    hdr * hhdr = GC_find_header(p);
    char buffer[GC_TYPE_DESCR_LEN + 1];
    int kind = hhdr -> hb_obj_kind;

    if (0 != GC_describe_type_fns[kind] && GC_is_marked(GC_base(p))) {
        /* This should preclude free list objects except with   */
        /* thread-local allocation.                             */
        buffer[GC_TYPE_DESCR_LEN] = 0;
        (GC_describe_type_fns[kind])(p, buffer);
        GC_ASSERT(buffer[GC_TYPE_DESCR_LEN] == 0);
        GC_err_puts(buffer);
    } else {
        switch(kind) {
          case PTRFREE:
            GC_err_puts("PTRFREE");
            break;
          case NORMAL:
            GC_err_puts("NORMAL");
            break;
          case UNCOLLECTABLE:
            GC_err_puts("UNCOLLECTABLE");
            break;
#         ifdef ATOMIC_UNCOLLECTABLE
            case AUNCOLLECTABLE:
              GC_err_puts("ATOMIC UNCOLLECTABLE");
              break;
#         endif
          case STUBBORN:
            GC_err_puts("STUBBORN");
            break;
          default:
            GC_err_printf("kind=%d descr=0x%lx", kind,
                          (unsigned long)(hhdr -> hb_descr));
        }
    }
}
Exemplo n.º 17
0
/* This call must be made from the new thread.  */
GC_INNER void GC_init_thread_local(GC_tlfs p)
{
    int i;

    GC_ASSERT(I_HOLD_LOCK());
    if (!EXPECT(keys_initialized, TRUE)) {
        if (0 != GC_key_create(&GC_thread_key, 0)) {
            ABORT("Failed to create key for local allocator");
        }
        keys_initialized = TRUE;
    }
    if (0 != GC_setspecific(GC_thread_key, p)) {
        ABORT("Failed to set thread specific allocation pointers");
    }
    for (i = 1; i < TINY_FREELISTS; ++i) {
        p -> ptrfree_freelists[i] = (void *)(word)1;
        p -> normal_freelists[i] = (void *)(word)1;
#       ifdef GC_GCJ_SUPPORT
          p -> gcj_freelists[i] = (void *)(word)1;
#       endif
#       ifdef ENABLE_DISCLAIM
          p -> finalized_freelists[i] = (void *)(word)1;
#       endif
    }
    /* Set up the size 0 free lists.    */
    /* We now handle most of them like regular free lists, to ensure    */
    /* That explicit deallocation works.  However, allocation of a      */
    /* size 0 "gcj" object is always an error.                          */
    p -> ptrfree_freelists[0] = (void *)(word)1;
    p -> normal_freelists[0] = (void *)(word)1;
#   ifdef GC_GCJ_SUPPORT
        p -> gcj_freelists[0] = ERROR_FL;
#   endif
#   ifdef ENABLE_DISCLAIM
        p -> finalized_freelists[0] = (void *)(word)1;
#   endif
}
Exemplo n.º 18
0
  GC_INNER void GC_process_togglerefs(void)
  {
    int i;
    int new_size = 0;

    GC_ASSERT(I_HOLD_LOCK());
    for (i = 0; i < GC_toggleref_array_size; ++i) {
      GCToggleRef r = GC_toggleref_arr[i];
      void *obj = r.strong_ref;

      if (((word)obj & 1) != 0) {
        obj = GC_REVEAL_POINTER(r.weak_ref);
      }
      if (NULL == obj) {
        continue;
      }
      switch (GC_toggleref_callback(obj)) {
      case GC_TOGGLE_REF_DROP:
        break;
      case GC_TOGGLE_REF_STRONG:
        GC_toggleref_arr[new_size++].strong_ref = obj;
        break;
      case GC_TOGGLE_REF_WEAK:
        GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
        break;
      default:
        ABORT("Bad toggle-ref status returned by callback");
      }
    }

    if (new_size < GC_toggleref_array_size) {
      BZERO(&GC_toggleref_arr[new_size],
            (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
      GC_toggleref_array_size = new_size;
    }
  }
Exemplo n.º 19
0
/* Also used (for assertion checking only) from thread_local_alloc.c.	*/
GC_thread GC_lookup_thread_inner(DWORD thread_id) {
  if (GC_win32_dll_threads) {
    int i;
    LONG my_max = GC_get_max_thread_index();
    for (i = 0;
       i <= my_max &&
       (!AO_load_acquire(&(dll_thread_table[i].in_use))
	|| dll_thread_table[i].id != thread_id);
       /* Must still be in_use, since nobody else can store our thread_id. */
       i++) {}
    if (i > my_max) {
      return 0;
    } else {
      return (GC_thread)(dll_thread_table + i);
    }
  } else {
    word hv = ((word)thread_id) % THREAD_TABLE_SZ;
    register GC_thread p = GC_threads[hv];
    
    GC_ASSERT(I_HOLD_LOCK());
    while (p != 0 && p -> id != thread_id) p = p -> next;
    return(p);
  }
}
Exemplo n.º 20
0
  /* this function is called repeatedly by GC_register_map_entries.     */
  GC_INNER void GC_remove_roots_subregion(ptr_t b, ptr_t e)
  {
    int i;
    GC_bool rebuild = FALSE;

    GC_ASSERT(I_HOLD_LOCK());
    GC_ASSERT((word)b % sizeof(word) == 0 && (word)e % sizeof(word) == 0);
    for (i = 0; i < n_root_sets; i++) {
      ptr_t r_start, r_end;

      if (GC_static_roots[i].r_tmp) {
        /* The remaining roots are skipped as they are all temporary. */
#       ifdef GC_ASSERTIONS
          int j;
          for (j = i + 1; j < n_root_sets; j++) {
            GC_ASSERT(GC_static_roots[j].r_tmp);
          }
#       endif
        break;
      }
      r_start = GC_static_roots[i].r_start;
      r_end = GC_static_roots[i].r_end;
      if (!EXPECT((word)e <= (word)r_start || (word)r_end <= (word)b, TRUE)) {
#       ifdef DEBUG_ADD_DEL_ROOTS
          GC_log_printf("Removing %p .. %p from root section %d (%p .. %p)\n",
                        (void *)b, (void *)e,
                        i, (void *)r_start, (void *)r_end);
#       endif
        if ((word)r_start < (word)b) {
          GC_root_size -= r_end - b;
          GC_static_roots[i].r_end = b;
          /* No need to rebuild as hash does not use r_end value. */
          if ((word)e < (word)r_end) {
            int j;

            if (rebuild) {
              GC_rebuild_root_index();
              rebuild = FALSE;
            }
            GC_add_roots_inner(e, r_end, FALSE); /* updates n_root_sets */
            for (j = i + 1; j < n_root_sets; j++)
              if (GC_static_roots[j].r_tmp)
                break;
            if (j < n_root_sets-1 && !GC_static_roots[n_root_sets-1].r_tmp) {
              /* Exchange the roots to have all temporary ones at the end. */
              ptr_t tmp_r_start = GC_static_roots[j].r_start;
              ptr_t tmp_r_end = GC_static_roots[j].r_end;

              GC_static_roots[j].r_start =
                                GC_static_roots[n_root_sets-1].r_start;
              GC_static_roots[j].r_end = GC_static_roots[n_root_sets-1].r_end;
              GC_static_roots[j].r_tmp = FALSE;
              GC_static_roots[n_root_sets-1].r_start = tmp_r_start;
              GC_static_roots[n_root_sets-1].r_end = tmp_r_end;
              GC_static_roots[n_root_sets-1].r_tmp = TRUE;
              rebuild = TRUE;
            }
          }
        } else {
          if ((word)e < (word)r_end) {
            GC_root_size -= e - r_start;
            GC_static_roots[i].r_start = e;
          } else {
            GC_remove_root_at_pos(i);
            i--;
          }
          rebuild = TRUE;
        }
      }
    }
    if (rebuild)
      GC_rebuild_root_index();
  }
Exemplo n.º 21
0
/// The one difference between this one and the one above
/// is the addition of the flag clear.  This shouldn't really
/// matter, but I was getting a bit of weird behavior in the baseline
/// results (milc 100ms longer than previously measured) and wanted to
/// eliminate it
STATIC void HINTGC_reclaim_block(struct hblk *hbp, word report_if_found)
{
    hdr * hhdr = HDR(hbp);
    size_t sz = hhdr -> hb_sz;  /* size of objects in current block     */
    struct obj_kind * ok = &GC_obj_kinds[hhdr -> hb_obj_kind];
    struct hblk ** rlh;

    // This line added for hinted collection since we have to
    // visit every block header anyways.  Clear out the
    // pending free flags.
    hhdr->hb_flags &= ~HAS_PENDING_FREE;

    if( sz > MAXOBJBYTES ) {  /* 1 big object */
        if( !mark_bit_from_hdr(hhdr, 0) ) {
            if (report_if_found) {
                GC_add_leaked((ptr_t)hbp);
            } else {
                size_t blocks = OBJ_SZ_TO_BLOCKS(sz);
                if (blocks > 1) {
                    GC_large_allocd_bytes -= blocks * HBLKSIZE;
                }
                GC_bytes_found += sz;
                GC_freehblk(hbp);
            }
        } else {
            if (hhdr -> hb_descr != 0) {
                GC_composite_in_use += sz;
            } else {
                GC_atomic_in_use += sz;
            }
        }
    } else {
        GC_bool empty = GC_block_empty(hhdr);
#       ifdef PARALLEL_MARK
        /* Count can be low or one too high because we sometimes      */
        /* have to ignore decrements.  Objects can also potentially   */
        /* be repeatedly marked by each marker.                       */
        /* Here we assume two markers, but this is extremely          */
        /* unlikely to fail spuriously with more.  And if it does, it */
        /* should be looked at.                                       */
        GC_ASSERT(hhdr -> hb_n_marks <= 2 * (HBLKSIZE/sz + 1) + 16);
#       else
        GC_ASSERT(sz * hhdr -> hb_n_marks <= HBLKSIZE);
#       endif
        if (hhdr -> hb_descr != 0) {
            GC_composite_in_use += sz * hhdr -> hb_n_marks;
        } else {
            GC_atomic_in_use += sz * hhdr -> hb_n_marks;
        }
        if (report_if_found) {
            GC_reclaim_small_nonempty_block(hbp, TRUE /* report_if_found */);
        } else if (empty) {
            GC_bytes_found += HBLKSIZE;
            GC_freehblk(hbp);
        } else if (GC_find_leak || !GC_block_nearly_full(hhdr)) {
            /* group of smaller objects, enqueue the real work */
            rlh = &(ok -> ok_reclaim_list[BYTES_TO_GRANULES(sz)]);
            hhdr -> hb_next = *rlh;
            *rlh = hbp;
        } /* else not worth salvaging. */
        /* We used to do the nearly_full check later, but we    */
        /* already have the right cache context here.  Also     */
        /* doing it here avoids some silly lock contention in   */
        /* GC_malloc_many.                                      */
    }
}
Exemplo n.º 22
0
static int zero_weak_boxes(GCTYPE *gc, int is_late, int force_zero, int from_inc, int fuel)
{
  GC_Weak_Box *wb;
  int num_gen0;

  if (from_inc) {
    wb = gc->inc_weak_boxes[is_late];
    num_gen0 = 0;
  } else {
    wb = append_weak_boxes(gc->weak_boxes[is_late],
                           gc->bp_weak_boxes[is_late],
                           &num_gen0);
    if (gc->gc_full || !gc->started_incremental)
      num_gen0 = 0;
  }

  while (wb) {
    GC_ASSERT(is_marked(gc, wb));
    if (!wb->val) {
      /* nothing to do */
    } else if (force_zero || !is_marked(gc, wb->val)) {
      wb->val = NULL;
      if (wb->secondary_erase) {
        void **p;
        mpage *page;

        /* it's possible for the secondary to be in an old generation
           and therefore on an mprotected page: */
        page = pagemap_find_page(gc->page_maps, wb->secondary_erase);
        if (page->mprotected) {
          page->mprotected = 0;
          mmu_write_unprotect_page(gc->mmu, page->addr, APAGE_SIZE, page_mmu_type(page), &page->mmu_src_block);
          page->reprotect_next = gc->reprotect_next;
          gc->reprotect_next = page;
          page->reprotect = 1;
        }
        p = (void **)GC_resolve2(wb->secondary_erase, gc);
        *(p + wb->soffset) = NULL;
        wb->secondary_erase = NULL;
      }
    } else {
      wb->val = GC_resolve2(wb->val, gc);
    }
    if (num_gen0 > 0) {
      if (!is_in_generation_half(gc, wb)) {
        /* For incremental mode, preserve this weak box
           in the incremental list for re-checking later. */
        check_weak_box_not_already_in_inc_chain(wb, gc->inc_weak_boxes[wb->is_late]);
        wb->inc_next = gc->inc_weak_boxes[is_late];
        gc->inc_weak_boxes[is_late] = wb;
      }
    }
    if (from_inc) {
      GC_Weak_Box *next;
      next = wb->inc_next;
      wb->inc_next = gc->weak_incremental_done;
      wb = next;
    } else
      wb = wb->next;

    num_gen0--;

    if (fuel >= 0) {
      if (fuel > 0)
        fuel--;
      else {
        GC_ASSERT(from_inc);
        gc->inc_weak_boxes[is_late] = wb;
        return 0;
      }
    }
  }

  /* reset, in case we have a second round */
  if (from_inc) {
    gc->inc_weak_boxes[is_late] = NULL;
  } else {
    gc->weak_boxes[is_late] = NULL;
    gc->bp_weak_boxes[is_late] = NULL;
  }

  return fuel;
}
Exemplo n.º 23
0
/* and invoke finalizers.						*/
void GC_finalize(void)
{
    struct disappearing_link * curr_dl, * prev_dl, * next_dl;
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr, real_link;
    size_t i;
    size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
    size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
    
  /* Make disappearing links disappear */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
        real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
        if (!GC_is_marked(real_ptr)) {
            *(word *)real_link = 0;
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }
  /* Mark all objects reachable via chains of 1 or more pointers	*/
  /* from finalizable objects.						*/
    GC_ASSERT(GC_mark_state == MS_NONE);
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
	    GC_MARKED_FOR_FINALIZATION(real_ptr);
            GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
            if (GC_is_marked(real_ptr)) {
                WARN("Finalization cycle involving %lx\n", real_ptr);
            }
        }
      }
    }
  /* Enqueue for finalization all objects that are still		*/
  /* unreachable.							*/
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
	    if (!GC_java_finalization) {
              GC_set_mark_bit(real_ptr);
	    }
            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (prev_fo == 0) {
                fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
            /* Add to list of objects awaiting finalization.	*/
              fo_set_next(curr_fo, GC_finalize_now);
              GC_finalize_now = curr_fo;
              /* unhide object pointer so any future collections will	*/
              /* see it.						*/
              curr_fo -> fo_hidden_base = 
              		(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                 	curr_fo -> fo_object_size
              		+ sizeof(struct finalizable_object);
	    GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

  if (GC_java_finalization) {
    /* make sure we mark everything reachable from objects finalized
       using the no_order mark_proc */
      for (curr_fo = GC_finalize_now; 
  	 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
  	real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
  	if (!GC_is_marked(real_ptr)) {
  	    if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
  	        GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
  	    }
	    if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
		GC_set_mark_bit(real_ptr);
	    }
  	}
      }

    /* now revive finalize-when-unreachable objects reachable from
       other finalizable objects */
      if (need_unreachable_finalization) {
        curr_fo = GC_finalize_now;
        prev_fo = 0;
        while (curr_fo != 0) {
	  next_fo = fo_next(curr_fo);
	  if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
	    real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
	    if (!GC_is_marked(real_ptr)) {
	      GC_set_mark_bit(real_ptr);
	    } else {
	      if (prev_fo == 0)
		GC_finalize_now = next_fo;
	      else
		fo_set_next(prev_fo, next_fo);

              curr_fo -> fo_hidden_base =
              		(word) HIDE_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized -=
                 	curr_fo -> fo_object_size + sizeof(struct finalizable_object);

	      i = HASH2(real_ptr, log_fo_table_size);
	      fo_set_next (curr_fo, fo_head[i]);
	      GC_fo_entries++;
	      fo_head[i] = curr_fo;
	      curr_fo = prev_fo;
	    }
	  }
	  prev_fo = curr_fo;
	  curr_fo = next_fo;
        }
      }
  }

  /* Remove dangling disappearing links. */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
        real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
        if (real_link != 0 && !GC_is_marked(real_link)) {
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }
}
Exemplo n.º 24
0
/* finalized when this finalizer is invoked.			*/
GC_API void GC_register_finalizer_inner(void * obj,
					GC_finalization_proc fn, void *cd,
					GC_finalization_proc *ofn, void **ocd,
					finalization_mark_proc mp)
{
    ptr_t base;
    struct finalizable_object * curr_fo, * prev_fo;
    size_t index;
    struct finalizable_object *new_fo;
    hdr *hhdr;
    DCL_LOCK_STATE;

#   ifdef THREADS
	LOCK();
#   endif
    if (log_fo_table_size == -1
        || GC_fo_entries > ((word)1 << log_fo_table_size)) {
    	GC_grow_table((struct hash_chain_entry ***)(&fo_head),
    		      &log_fo_table_size);
	if (GC_print_stats) {
	    GC_log_printf("Grew fo table to %u entries\n",
	    	          (1 << log_fo_table_size));
	}
    }
    /* in the THREADS case signals are disabled and we hold allocation	*/
    /* lock; otherwise neither is true.  Proceed carefully.		*/
    base = (ptr_t)obj;
    index = HASH2(base, log_fo_table_size);
    prev_fo = 0; curr_fo = fo_head[index];
    while (curr_fo != 0) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
            /* Interruption by a signal in the middle of this	*/
            /* should be safe.  The client may see only *ocd	*/
            /* updated, but we'll declare that to be his	*/
            /* problem.						*/
            if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
            if (ofn) *ofn = curr_fo -> fo_fn;
            /* Delete the structure for base. */
                if (prev_fo == 0) {
                  fo_head[index] = fo_next(curr_fo);
                } else {
                  fo_set_next(prev_fo, fo_next(curr_fo));
                }
            if (fn == 0) {
                GC_fo_entries--;
                  /* May not happen if we get a signal.  But a high	*/
                  /* estimate will only make the table larger than	*/
                  /* necessary.						*/
#		if !defined(THREADS) && !defined(DBG_HDRS_ALL)
                  GC_free((void *)curr_fo);
#		endif
            } else {
                curr_fo -> fo_fn = fn;
                curr_fo -> fo_client_data = (ptr_t)cd;
                curr_fo -> fo_mark_proc = mp;
		/* Reinsert it.  We deleted it first to maintain	*/
		/* consistency in the event of a signal.		*/
		if (prev_fo == 0) {
                  fo_head[index] = curr_fo;
                } else {
                  fo_set_next(prev_fo, curr_fo);
                }
            }
#	    ifdef THREADS
                UNLOCK();
#	    endif
            return;
        }
        prev_fo = curr_fo;
        curr_fo = fo_next(curr_fo);
    }
    if (ofn) *ofn = 0;
    if (ocd) *ocd = 0;
    if (fn == 0) {
#	ifdef THREADS
            UNLOCK();
#	endif
        return;
    }
    GET_HDR(base, hhdr);
    if (0 == hhdr) {
      /* We won't collect it, hence finalizer wouldn't be run. */
#     ifdef THREADS
          UNLOCK();
#     endif
      return;
    }
    new_fo = (struct finalizable_object *)
    	GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
    if (EXPECT(0 == new_fo, FALSE)) {
#     ifdef THREADS
	UNLOCK();
#     endif
      new_fo = (struct finalizable_object *)
	      GC_oom_fn(sizeof(struct finalizable_object));
      if (0 == new_fo) {
	GC_finalization_failures++;
	return;
      }
      /* It's not likely we'll make it here, but ... */
#     ifdef THREADS
	LOCK();
#     endif
    }
    GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
    new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
    new_fo -> fo_fn = fn;
    new_fo -> fo_client_data = (ptr_t)cd;
    new_fo -> fo_object_size = hhdr -> hb_sz;
    new_fo -> fo_mark_proc = mp;
    fo_set_next(new_fo, fo_head[index]);
    GC_fo_entries++;
    fo_head[index] = new_fo;
#   ifdef THREADS
        UNLOCK();
#   endif
}
Exemplo n.º 25
0
/* enqueued for finalization.                                           */
GC_INNER void GC_finalize(void)
{
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr;
    size_t i;
    size_t fo_size = log_fo_table_size == -1 ? 0 :
                                (size_t)1 << log_fo_table_size;

#   ifndef SMALL_CONFIG
      /* Save current GC_[dl/ll]_entries value for stats printing */
      GC_old_dl_entries = GC_dl_hashtbl.entries;
#     ifndef GC_LONG_REFS_NOT_NEEDED
        GC_old_ll_entries = GC_ll_hashtbl.entries;
#     endif
#   endif

#   ifndef GC_TOGGLE_REFS_NOT_NEEDED
      GC_mark_togglerefs();
#   endif
    GC_make_disappearing_links_disappear(&GC_dl_hashtbl);

  /* Mark all objects reachable via chains of 1 or more pointers        */
  /* from finalizable objects.                                          */
    GC_ASSERT(GC_mark_state == MS_NONE);
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = GC_fnlz_roots.fo_head[i];
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            GC_MARKED_FOR_FINALIZATION(real_ptr);
            GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
            if (GC_is_marked(real_ptr)) {
                WARN("Finalization cycle involving %p\n", real_ptr);
            }
        }
      }
    }
  /* Enqueue for finalization all objects that are still                */
  /* unreachable.                                                       */
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = GC_fnlz_roots.fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            if (!GC_java_finalization) {
              GC_set_mark_bit(real_ptr);
            }
            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (NULL == prev_fo) {
                GC_fnlz_roots.fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
              if (GC_object_finalized_proc)
                GC_object_finalized_proc(real_ptr);

            /* Add to list of objects awaiting finalization.    */
              fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
              GC_fnlz_roots.finalize_now = curr_fo;
              /* unhide object pointer so any future collections will   */
              /* see it.                                                */
              curr_fo -> fo_hidden_base =
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                        curr_fo -> fo_object_size
                        + sizeof(struct finalizable_object);
            GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

  if (GC_java_finalization) {
    /* make sure we mark everything reachable from objects finalized
       using the no_order mark_proc */
      for (curr_fo = GC_fnlz_roots.finalize_now;
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
        real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
        if (!GC_is_marked(real_ptr)) {
            if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
                GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
            }
            if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
                GC_set_mark_bit(real_ptr);
            }
        }
      }

    /* now revive finalize-when-unreachable objects reachable from
       other finalizable objects */
      if (need_unreachable_finalization) {
        curr_fo = GC_fnlz_roots.finalize_now;
        prev_fo = NULL;
        while (curr_fo != NULL) {
          next_fo = fo_next(curr_fo);
          if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
            real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
            if (!GC_is_marked(real_ptr)) {
              GC_set_mark_bit(real_ptr);
            } else {
              if (NULL == prev_fo) {
                GC_fnlz_roots.finalize_now = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              curr_fo -> fo_hidden_base =
                                GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized -=
                  curr_fo->fo_object_size + sizeof(struct finalizable_object);

              i = HASH2(real_ptr, log_fo_table_size);
              fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
              GC_fo_entries++;
              GC_fnlz_roots.fo_head[i] = curr_fo;
              curr_fo = prev_fo;
            }
          }
          prev_fo = curr_fo;
          curr_fo = next_fo;
        }
      }
  }

  GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
# ifndef GC_TOGGLE_REFS_NOT_NEEDED
    GC_clear_togglerefs();
# endif
# ifndef GC_LONG_REFS_NOT_NEEDED
    GC_make_disappearing_links_disappear(&GC_ll_hashtbl);
    GC_remove_dangling_disappearing_links(&GC_ll_hashtbl);
# endif

  if (GC_fail_count) {
    /* Don't prevent running finalizers if there has been an allocation */
    /* failure recently.                                                */
#   ifdef THREADS
      GC_reset_finalizer_nested();
#   else
      GC_finalizer_nested = 0;
#   endif
  }
}
Exemplo n.º 26
0
/* re-registering dynamic libraries.                                    */
void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
{
    struct roots * old;

    /* Adjust and check range boundaries for safety */
    GC_ASSERT((word)b % sizeof(word) == 0);
    e = (ptr_t)((word)e & ~(sizeof(word) - 1));
    GC_ASSERT(b <= e);
    if (b == e) return;  /* nothing to do? */

#   if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
      /* Spend the time to ensure that there are no overlapping */
      /* or adjacent intervals.                                 */
      /* This could be done faster with e.g. a                  */
      /* balanced tree.  But the execution time here is         */
      /* virtually guaranteed to be dominated by the time it    */
      /* takes to scan the roots.                               */
      {
        register int i;
        old = 0; /* initialized to prevent warning. */
        for (i = 0; i < n_root_sets; i++) {
            old = GC_static_roots + i;
            if (b <= old -> r_end && e >= old -> r_start) {
                if (b < old -> r_start) {
                    old -> r_start = b;
                    GC_root_size += (old -> r_start - b);
                }
                if (e > old -> r_end) {
                    old -> r_end = e;
                    GC_root_size += (e - old -> r_end);
                }
                old -> r_tmp &= tmp;
                break;
            }
        }
        if (i < n_root_sets) {
          /* merge other overlapping intervals */
            struct roots *other;

            for (i++; i < n_root_sets; i++) {
              other = GC_static_roots + i;
              b = other -> r_start;
              e = other -> r_end;
              if (b <= old -> r_end && e >= old -> r_start) {
                if (b < old -> r_start) {
                    old -> r_start = b;
                    GC_root_size += (old -> r_start - b);
                }
                if (e > old -> r_end) {
                    old -> r_end = e;
                    GC_root_size += (e - old -> r_end);
                }
                old -> r_tmp &= other -> r_tmp;
                /* Delete this entry. */
                  GC_root_size -= (other -> r_end - other -> r_start);
                  other -> r_start = GC_static_roots[n_root_sets-1].r_start;
                  other -> r_end = GC_static_roots[n_root_sets-1].r_end;
                  n_root_sets--;
              }
            }
          return;
        }
      }
#   else
      old = GC_roots_present(b);
      if (old != 0) {
        if (e <= old -> r_end) /* already there */ return;
        /* else extend */
        GC_root_size += e - old -> r_end;
        old -> r_end = e;
        return;
      }
#   endif
    if (n_root_sets == MAX_ROOT_SETS) {
        ABORT("Too many root sets");
    }
    GC_static_roots[n_root_sets].r_start = (ptr_t)b;
    GC_static_roots[n_root_sets].r_end = (ptr_t)e;
    GC_static_roots[n_root_sets].r_tmp = tmp;
#   if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
      GC_static_roots[n_root_sets].r_next = 0;
      add_roots_to_index(GC_static_roots + n_root_sets);
#   endif
    GC_root_size += e - b;
    n_root_sets++;
}
Exemplo n.º 27
0
/* finalized when this finalizer is invoked.                    */
STATIC void GC_register_finalizer_inner(void * obj,
                                        GC_finalization_proc fn, void *cd,
                                        GC_finalization_proc *ofn, void **ocd,
                                        finalization_mark_proc mp)
{
    ptr_t base;
    struct finalizable_object * curr_fo, * prev_fo;
    size_t index;
    struct finalizable_object *new_fo = 0;
    hdr *hhdr = NULL; /* initialized to prevent warning. */
    GC_oom_func oom_fn;
    DCL_LOCK_STATE;

    LOCK();
    if (log_fo_table_size == -1
        || GC_fo_entries > ((word)1 << log_fo_table_size)) {
        GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
                      &log_fo_table_size);
        GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
                           1 << (unsigned)log_fo_table_size);
    }
    /* in the THREADS case we hold allocation lock.             */
    base = (ptr_t)obj;
    for (;;) {
      index = HASH2(base, log_fo_table_size);
      prev_fo = 0;
      curr_fo = GC_fnlz_roots.fo_head[index];
      while (curr_fo != 0) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(base)) {
          /* Interruption by a signal in the middle of this     */
          /* should be safe.  The client may see only *ocd      */
          /* updated, but we'll declare that to be his problem. */
          if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
          if (ofn) *ofn = curr_fo -> fo_fn;
          /* Delete the structure for base. */
          if (prev_fo == 0) {
            GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
          } else {
            fo_set_next(prev_fo, fo_next(curr_fo));
          }
          if (fn == 0) {
            GC_fo_entries--;
            /* May not happen if we get a signal.  But a high   */
            /* estimate will only make the table larger than    */
            /* necessary.                                       */
#           if !defined(THREADS) && !defined(DBG_HDRS_ALL)
              GC_free((void *)curr_fo);
#           endif
          } else {
            curr_fo -> fo_fn = fn;
            curr_fo -> fo_client_data = (ptr_t)cd;
            curr_fo -> fo_mark_proc = mp;
            /* Reinsert it.  We deleted it first to maintain    */
            /* consistency in the event of a signal.            */
            if (prev_fo == 0) {
              GC_fnlz_roots.fo_head[index] = curr_fo;
            } else {
              fo_set_next(prev_fo, curr_fo);
            }
          }
          UNLOCK();
#         ifndef DBG_HDRS_ALL
            if (EXPECT(new_fo != 0, FALSE)) {
              /* Free unused new_fo returned by GC_oom_fn() */
              GC_free((void *)new_fo);
            }
#         endif
          return;
        }
        prev_fo = curr_fo;
        curr_fo = fo_next(curr_fo);
      }
      if (EXPECT(new_fo != 0, FALSE)) {
        /* new_fo is returned by GC_oom_fn(), so fn != 0 and hhdr != 0. */
        break;
      }
      if (fn == 0) {
        if (ocd) *ocd = 0;
        if (ofn) *ofn = 0;
        UNLOCK();
        return;
      }
      GET_HDR(base, hhdr);
      if (EXPECT(0 == hhdr, FALSE)) {
        /* We won't collect it, hence finalizer wouldn't be run. */
        if (ocd) *ocd = 0;
        if (ofn) *ofn = 0;
        UNLOCK();
        return;
      }
      new_fo = (struct finalizable_object *)
        GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
      if (EXPECT(new_fo != 0, TRUE))
        break;
      oom_fn = GC_oom_fn;
      UNLOCK();
      new_fo = (struct finalizable_object *)
                (*oom_fn)(sizeof(struct finalizable_object));
      if (0 == new_fo) {
        /* No enough memory.  *ocd and *ofn remains unchanged.  */
        return;
      }
      /* It's not likely we'll make it here, but ... */
      LOCK();
      /* Recalculate index since the table may grow and         */
      /* check again that our finalizer is not in the table.    */
    }
    GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
    if (ocd) *ocd = 0;
    if (ofn) *ofn = 0;
    new_fo -> fo_hidden_base = GC_HIDE_POINTER(base);
    new_fo -> fo_fn = fn;
    new_fo -> fo_client_data = (ptr_t)cd;
    new_fo -> fo_object_size = hhdr -> hb_sz;
    new_fo -> fo_mark_proc = mp;
    fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
    GC_fo_entries++;
    GC_fnlz_roots.fo_head[index] = new_fo;
    UNLOCK();
}
Exemplo n.º 28
0
/* re-registering dynamic libraries.                                    */
void GC_add_roots_inner(ptr_t b, ptr_t e, GC_bool tmp)
{
    GC_ASSERT((word)b <= (word)e);
    b = (ptr_t)(((word)b + (sizeof(word) - 1)) & ~(word)(sizeof(word) - 1));
                                        /* round b up to word boundary */
    e = (ptr_t)((word)e & ~(word)(sizeof(word) - 1));
                                        /* round e down to word boundary */
    if ((word)b >= (word)e) return; /* nothing to do */

#   if defined(MSWIN32) || defined(MSWINCE) || defined(CYGWIN32)
      /* Spend the time to ensure that there are no overlapping */
      /* or adjacent intervals.                                 */
      /* This could be done faster with e.g. a                  */
      /* balanced tree.  But the execution time here is         */
      /* virtually guaranteed to be dominated by the time it    */
      /* takes to scan the roots.                               */
      {
        int i;
        struct roots * old = NULL; /* initialized to prevent warning. */

        for (i = 0; i < n_root_sets; i++) {
            old = GC_static_roots + i;
            if ((word)b <= (word)old->r_end
                 && (word)e >= (word)old->r_start) {
                if ((word)b < (word)old->r_start) {
                    GC_root_size += old->r_start - b;
                    old -> r_start = b;
                }
                if ((word)e > (word)old->r_end) {
                    GC_root_size += e - old->r_end;
                    old -> r_end = e;
                }
                old -> r_tmp &= tmp;
                break;
            }
        }
        if (i < n_root_sets) {
          /* merge other overlapping intervals */
            struct roots *other;

            for (i++; i < n_root_sets; i++) {
              other = GC_static_roots + i;
              b = other -> r_start;
              e = other -> r_end;
              if ((word)b <= (word)old->r_end
                  && (word)e >= (word)old->r_start) {
                if ((word)b < (word)old->r_start) {
                    GC_root_size += old->r_start - b;
                    old -> r_start = b;
                }
                if ((word)e > (word)old->r_end) {
                    GC_root_size += e - old->r_end;
                    old -> r_end = e;
                }
                old -> r_tmp &= other -> r_tmp;
                /* Delete this entry. */
                  GC_root_size -= (other -> r_end - other -> r_start);
                  other -> r_start = GC_static_roots[n_root_sets-1].r_start;
                  other -> r_end = GC_static_roots[n_root_sets-1].r_end;
                  n_root_sets--;
              }
            }
          return;
        }
      }
#   else
      {
        struct roots * old = (struct roots *)GC_roots_present(b);

        if (old != 0) {
          if ((word)e <= (word)old->r_end) {
            old -> r_tmp &= tmp;
            return; /* already there */
          }
          if (old -> r_tmp == tmp || !tmp) {
            /* Extend the existing root. */
            GC_root_size += e - old -> r_end;
            old -> r_end = e;
            old -> r_tmp = tmp;
            return;
          }
          b = old -> r_end;
        }
      }
#   endif
    if (n_root_sets == MAX_ROOT_SETS) {
        ABORT("Too many root sets");
    }

#   ifdef DEBUG_ADD_DEL_ROOTS
      GC_log_printf("Adding data root section %d: %p .. %p%s\n",
                    n_root_sets, (void *)b, (void *)e,
                    tmp ? " (temporary)" : "");
#   endif
    GC_static_roots[n_root_sets].r_start = (ptr_t)b;
    GC_static_roots[n_root_sets].r_end = (ptr_t)e;
    GC_static_roots[n_root_sets].r_tmp = tmp;
#   if !defined(MSWIN32) && !defined(MSWINCE) && !defined(CYGWIN32)
      GC_static_roots[n_root_sets].r_next = 0;
      add_roots_to_index(GC_static_roots + n_root_sets);
#   endif
    GC_root_size += e - b;
    n_root_sets++;
}
Exemplo n.º 29
0
/*
 * This may be called from DllMain, and hence operates under unusual
 * constraints.
 */
static GC_thread GC_new_thread(void) {
  int i;
  /* It appears to be unsafe to acquire a lock here, since this	*/
  /* code is apparently not preeemptible on some systems.	*/
  /* (This is based on complaints, not on Microsoft's official	*/
  /* documentation, which says this should perform "only simple	*/
  /* initialization tasks".)					*/
  /* Hence we make do with nonblocking synchronization.		*/

  /* The following should be a noop according to the win32	*/
  /* documentation.  There is empirical evidence that it	*/
  /* isn't.		- HB					*/
# if defined(MPROTECT_VDB)
   if (GC_incremental) SetUnhandledExceptionFilter(GC_write_fault_handler);
# endif
                /* cast away volatile qualifier */
  for (i = 0; InterlockedExchange((IE_t)&thread_table[i].in_use,1) != 0; i++) {
    /* Compare-and-swap would make this cleaner, but that's not 	*/
    /* supported before Windows 98 and NT 4.0.  In Windows 2000,	*/
    /* InterlockedExchange is supposed to be replaced by		*/
    /* InterlockedExchangePointer, but that's not really what I		*/
    /* want here.							*/
    if (i == MAX_THREADS - 1)
      ABORT("too many threads");
  }
  /* Update GC_max_thread_index if necessary.  The following is safe,	*/
  /* and unlike CompareExchange-based solutions seems to work on all	*/
  /* Windows95 and later platforms.					*/
  /* Unfortunately, GC_max_thread_index may be temporarily out of 	*/
  /* bounds, so readers have to compensate.				*/
  while (i > GC_max_thread_index) {
    InterlockedIncrement((IE_t)&GC_max_thread_index);
  }
  if (GC_max_thread_index >= MAX_THREADS) {
    /* We overshot due to simultaneous increments.	*/
    /* Setting it to MAX_THREADS-1 is always safe.	*/
    GC_max_thread_index = MAX_THREADS - 1;
  }
  
# ifdef CYGWIN32
    thread_table[i].pthread_id = pthread_self();
# endif
  if (!DuplicateHandle(GetCurrentProcess(),
	               GetCurrentThread(),
		       GetCurrentProcess(),
		       (HANDLE*)&thread_table[i].handle,
		       0,
		       0,
		       DUPLICATE_SAME_ACCESS)) {
	DWORD last_error = GetLastError();
	GC_printf1("Last error code: %lx\n", last_error);
	ABORT("DuplicateHandle failed");
  }
  thread_table[i].stack_base = GC_get_stack_base();
  /* Up until this point, GC_push_all_stacks considers this thread	*/
  /* invalid.								*/
  if (thread_table[i].stack_base == NULL) 
    ABORT("Failed to find stack base in GC_new_thread");
  /* Up until this point, this entry is viewed as reserved but invalid	*/
  /* by GC_delete_thread.						*/
  thread_table[i].id = GetCurrentThreadId();
  /* If this thread is being created while we are trying to stop	*/
  /* the world, wait here.  Hopefully this can't happen on any	*/
  /* systems that don't allow us to block here.			*/
  while (GC_please_stop) Sleep(20);
  GC_ASSERT(!thread_table[i]->thread_blocked);
  return thread_table + i;
}
Exemplo n.º 30
0
static void pop_in_progress(ptr_t p)
{
  --n_in_progress;
  GC_ASSERT(in_progress_space[n_in_progress] == p);
}