Esempio n. 1
0
  /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
  STATIC void GC_enqueue_all_finalizers(void)
  {
    struct finalizable_object * curr_fo, * next_fo;
    ptr_t real_ptr;
    int i;
    int fo_size;

    fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = GC_fnlz_roots.fo_head[i];
      GC_fnlz_roots.fo_head[i] = NULL;
      while (curr_fo != NULL) {
          real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
          GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
          GC_set_mark_bit(real_ptr);

          next_fo = fo_next(curr_fo);

          /* Add to list of objects awaiting finalization.      */
          fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
          GC_fnlz_roots.finalize_now = curr_fo;

          /* unhide object pointer so any future collections will       */
          /* see it.                                                    */
          curr_fo -> fo_hidden_base =
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
          GC_bytes_finalized +=
                curr_fo -> fo_object_size + sizeof(struct finalizable_object);
          curr_fo = next_fo;
        }
    }
    GC_fo_entries = 0;  /* all entries deleted from the hash table */
  }
Esempio n. 2
0
GC_INNER GC_bool GC_check_leaked(ptr_t base)
{
  size_t i;
  size_t obj_sz;
  word *p;

  if (
#     if defined(KEEP_BACK_PTRS) || defined(MAKE_BACK_GRAPH)
        (*(word *)base & 1) != 0 &&
#     endif
      GC_has_other_debug_info(base) >= 0)
    return TRUE; /* object has leaked */

  /* Validate freed object's content. */
  p = (word *)(base + sizeof(oh));
  obj_sz = BYTES_TO_WORDS(HDR(base)->hb_sz - sizeof(oh));
  for (i = 0; i < obj_sz; ++i)
    if (p[i] != GC_FREED_MEM_MARKER) {
        GC_set_mark_bit(base); /* do not reclaim it in this cycle */
        GC_add_smashed((ptr_t)(&p[i])); /* alter-after-free detected */
        break; /* don't report any other smashed locations in the object */
    }

  return FALSE; /* GC_debug_free() has been called */
}
Esempio n. 3
0
GC_INNER void GC_traverse_back_graph(void)
{
  GC_ASSERT(I_HOLD_LOCK());
  GC_max_height = 0;
  GC_apply_to_each_object(update_max_height);
  if (0 != GC_deepest_obj)
    GC_set_mark_bit(GC_deepest_obj);  /* Keep it until we can print it. */
}
Esempio n. 4
0
STATIC void GC_add_leaked(ptr_t leaked)
{
    if (GC_n_leaked < MAX_LEAKED) {
      GC_have_errors = TRUE;
      GC_leaked[GC_n_leaked++] = leaked;
      /* Make sure it's not reclaimed this cycle */
        GC_set_mark_bit(leaked);
    }
}
Esempio n. 5
0
 static void push_and_mark_object(void *p)
 {
   GC_normal_finalize_mark_proc(p);
   while (!GC_mark_stack_empty()) {
     MARK_FROM_MARK_STACK();
   }
   GC_set_mark_bit(p);
   if (GC_mark_state != MS_NONE) {
     while (!GC_mark_some(0)) {
       /* Empty. */
     }
   }
 }
Esempio n. 6
0
GC_INLINE void GC_add_leaked(ptr_t leaked)
{
#  ifndef SHORT_DBG_HDRS
     if (GC_findleak_delay_free && !GC_check_leaked(leaked))
       return;
#  endif

    GC_have_errors = TRUE;
    if (GC_n_leaked < MAX_LEAKED) {
      GC_leaked[GC_n_leaked++] = leaked;
      /* Make sure it's not reclaimed this cycle */
      GC_set_mark_bit(leaked);
    }
}
Esempio n. 7
0
GC_INLINE void GC_add_leaked(ptr_t leaked)
{
#  ifndef SHORT_DBG_HDRS
    if (GC_findleak_delay_free && !GC_check_leaked(leaked))
        return;
#  endif

    GC_have_errors = TRUE;
    /* FIXME: Prevent adding an object while printing leaked ones.      */
    if (GC_n_leaked < MAX_LEAKED) {
        GC_leaked[GC_n_leaked++] = leaked;
        /* Make sure it's not reclaimed this cycle */
        GC_set_mark_bit(leaked);
    }
}
Esempio n. 8
0
  STATIC void GC_mark_togglerefs(void)
  {
    int i;
    if (NULL == GC_toggleref_arr)
      return;

    /* TODO: Hide GC_toggleref_arr to avoid its marking from roots. */
    GC_set_mark_bit(GC_toggleref_arr);
    for (i = 0; i < GC_toggleref_array_size; ++i) {
      void *obj = GC_toggleref_arr[i].strong_ref;
      if (obj != NULL && ((word)obj & 1) == 0) {
        push_and_mark_object(obj);
      }
    }
  }
Esempio n. 9
0
/* Enqueue all remaining finalizers to be run - Assumes lock is
 * held, and signals are disabled */
void GC_enqueue_all_finalizers(void)
{
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr;
    int i;
    int fo_size;
    
    fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
        curr_fo = fo_head[i];
        prev_fo = 0;
      while (curr_fo != 0) {
          real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
          GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
          GC_set_mark_bit(real_ptr);
 
          /* Delete from hash table */
          next_fo = fo_next(curr_fo);
          if (prev_fo == 0) {
              fo_head[i] = next_fo;
          } else {
              fo_set_next(prev_fo, next_fo);
          }
          GC_fo_entries--;

          /* Add to list of objects awaiting finalization.	*/
          fo_set_next(curr_fo, GC_finalize_now);
          GC_finalize_now = curr_fo;

          /* unhide object pointer so any future collections will	*/
          /* see it.						*/
          curr_fo -> fo_hidden_base = 
        		(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);

          GC_bytes_finalized +=
           	curr_fo -> fo_object_size
        		+ sizeof(struct finalizable_object);
          curr_fo = next_fo;
        }
    }

    return;
}
Esempio n. 10
0
/* and invoke finalizers.						*/
void GC_finalize(void)
{
    struct disappearing_link * curr_dl, * prev_dl, * next_dl;
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr, real_link;
    size_t i;
    size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
    size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
    
  /* Make disappearing links disappear */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
        real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
        if (!GC_is_marked(real_ptr)) {
            *(word *)real_link = 0;
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }
  /* Mark all objects reachable via chains of 1 or more pointers	*/
  /* from finalizable objects.						*/
    GC_ASSERT(GC_mark_state == MS_NONE);
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
	    GC_MARKED_FOR_FINALIZATION(real_ptr);
            GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
            if (GC_is_marked(real_ptr)) {
                WARN("Finalization cycle involving %lx\n", real_ptr);
            }
        }
      }
    }
  /* Enqueue for finalization all objects that are still		*/
  /* unreachable.							*/
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
	    if (!GC_java_finalization) {
              GC_set_mark_bit(real_ptr);
	    }
            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (prev_fo == 0) {
                fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
            /* Add to list of objects awaiting finalization.	*/
              fo_set_next(curr_fo, GC_finalize_now);
              GC_finalize_now = curr_fo;
              /* unhide object pointer so any future collections will	*/
              /* see it.						*/
              curr_fo -> fo_hidden_base = 
              		(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                 	curr_fo -> fo_object_size
              		+ sizeof(struct finalizable_object);
	    GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

  if (GC_java_finalization) {
    /* make sure we mark everything reachable from objects finalized
       using the no_order mark_proc */
      for (curr_fo = GC_finalize_now; 
  	 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
  	real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
  	if (!GC_is_marked(real_ptr)) {
  	    if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
  	        GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
  	    }
	    if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
		GC_set_mark_bit(real_ptr);
	    }
  	}
      }

    /* now revive finalize-when-unreachable objects reachable from
       other finalizable objects */
      if (need_unreachable_finalization) {
        curr_fo = GC_finalize_now;
        prev_fo = 0;
        while (curr_fo != 0) {
	  next_fo = fo_next(curr_fo);
	  if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
	    real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
	    if (!GC_is_marked(real_ptr)) {
	      GC_set_mark_bit(real_ptr);
	    } else {
	      if (prev_fo == 0)
		GC_finalize_now = next_fo;
	      else
		fo_set_next(prev_fo, next_fo);

              curr_fo -> fo_hidden_base =
              		(word) HIDE_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized -=
                 	curr_fo -> fo_object_size + sizeof(struct finalizable_object);

	      i = HASH2(real_ptr, log_fo_table_size);
	      fo_set_next (curr_fo, fo_head[i]);
	      GC_fo_entries++;
	      fo_head[i] = curr_fo;
	      curr_fo = prev_fo;
	    }
	  }
	  prev_fo = curr_fo;
	  curr_fo = next_fo;
        }
      }
  }

  /* Remove dangling disappearing links. */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
        real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
        if (real_link != 0 && !GC_is_marked(real_link)) {
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }
}
Esempio n. 11
0
/*
 * Call the mark routines (GC_tl_push for a single pointer, GC_push_conditional
 * on groups of pointers) on every top level accessible pointer.
 * If all is FALSE, arrange to push only possibly altered values.
 * Cold_gc_frame is an address inside a GC frame that
 * remains valid until all marking is complete.
 * A zero value indicates that it's OK to miss some
 * register values.
 */
GC_INNER void GC_push_roots(GC_bool all, ptr_t cold_gc_frame)
{
    int i;
    unsigned kind;

    /*
     * Next push static data.  This must happen early on, since it's
     * not robust against mark stack overflow.
     */
     /* Re-register dynamic libraries, in case one got added.           */
     /* There is some argument for doing this as late as possible,      */
     /* especially on win32, where it can change asynchronously.        */
     /* In those cases, we do it here.  But on other platforms, it's    */
     /* not safe with the world stopped, so we do it earlier.           */
#      if !defined(REGISTER_LIBRARIES_EARLY)
         GC_cond_register_dynamic_libraries();
#      endif

     /* Mark everything in static data areas                             */
       for (i = 0; i < n_root_sets; i++) {
         GC_push_conditional_with_exclusions(
                             GC_static_roots[i].r_start,
                             GC_static_roots[i].r_end, all);
       }

     /* Mark all free list header blocks, if those were allocated from  */
     /* the garbage collected heap.  This makes sure they don't         */
     /* disappear if we are not marking from static data.  It also      */
     /* saves us the trouble of scanning them, and possibly that of     */
     /* marking the freelists.                                          */
       for (kind = 0; kind < GC_n_kinds; kind++) {
         void *base = GC_base(GC_obj_kinds[kind].ok_freelist);
         if (0 != base) {
           GC_set_mark_bit(base);
         }
       }

     /* Mark from GC internal roots if those might otherwise have       */
     /* been excluded.                                                  */
       if (GC_no_dls || roots_were_cleared) {
           GC_push_gc_structures();
       }

     /* Mark thread local free lists, even if their mark        */
     /* descriptor excludes the link field.                     */
     /* If the world is not stopped, this is unsafe.  It is     */
     /* also unnecessary, since we will do this again with the  */
     /* world stopped.                                          */
#      if defined(THREAD_LOCAL_ALLOC)
         if (GC_world_stopped) GC_mark_thread_local_free_lists();
#      endif

    /*
     * Now traverse stacks, and mark from register contents.
     * These must be done last, since they can legitimately overflow
     * the mark stack.
     * This is usually done by saving the current context on the
     * stack, and then just tracing from the stack.
     */
      GC_push_regs_and_stack(cold_gc_frame);

    if (GC_push_other_roots != 0) (*GC_push_other_roots)();
        /* In the threads case, this also pushes thread stacks. */
        /* Note that without interior pointer recognition lots  */
        /* of stuff may have been pushed already, and this      */
        /* should be careful about mark stack overflows.        */
}
Esempio n. 12
0
/* enqueued for finalization.                                           */
GC_INNER void GC_finalize(void)
{
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr;
    size_t i;
    size_t fo_size = log_fo_table_size == -1 ? 0 :
                                (size_t)1 << log_fo_table_size;

#   ifndef SMALL_CONFIG
      /* Save current GC_[dl/ll]_entries value for stats printing */
      GC_old_dl_entries = GC_dl_hashtbl.entries;
#     ifndef GC_LONG_REFS_NOT_NEEDED
        GC_old_ll_entries = GC_ll_hashtbl.entries;
#     endif
#   endif

#   ifndef GC_TOGGLE_REFS_NOT_NEEDED
      GC_mark_togglerefs();
#   endif
    GC_make_disappearing_links_disappear(&GC_dl_hashtbl);

  /* Mark all objects reachable via chains of 1 or more pointers        */
  /* from finalizable objects.                                          */
    GC_ASSERT(GC_mark_state == MS_NONE);
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = GC_fnlz_roots.fo_head[i];
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            GC_MARKED_FOR_FINALIZATION(real_ptr);
            GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
            if (GC_is_marked(real_ptr)) {
                WARN("Finalization cycle involving %p\n", real_ptr);
            }
        }
      }
    }
  /* Enqueue for finalization all objects that are still                */
  /* unreachable.                                                       */
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = GC_fnlz_roots.fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            if (!GC_java_finalization) {
              GC_set_mark_bit(real_ptr);
            }
            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (NULL == prev_fo) {
                GC_fnlz_roots.fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
              if (GC_object_finalized_proc)
                GC_object_finalized_proc(real_ptr);

            /* Add to list of objects awaiting finalization.    */
              fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
              GC_fnlz_roots.finalize_now = curr_fo;
              /* unhide object pointer so any future collections will   */
              /* see it.                                                */
              curr_fo -> fo_hidden_base =
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                        curr_fo -> fo_object_size
                        + sizeof(struct finalizable_object);
            GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

  if (GC_java_finalization) {
    /* make sure we mark everything reachable from objects finalized
       using the no_order mark_proc */
      for (curr_fo = GC_fnlz_roots.finalize_now;
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
        real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
        if (!GC_is_marked(real_ptr)) {
            if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
                GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
            }
            if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
                GC_set_mark_bit(real_ptr);
            }
        }
      }

    /* now revive finalize-when-unreachable objects reachable from
       other finalizable objects */
      if (need_unreachable_finalization) {
        curr_fo = GC_fnlz_roots.finalize_now;
        prev_fo = NULL;
        while (curr_fo != NULL) {
          next_fo = fo_next(curr_fo);
          if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
            real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
            if (!GC_is_marked(real_ptr)) {
              GC_set_mark_bit(real_ptr);
            } else {
              if (NULL == prev_fo) {
                GC_fnlz_roots.finalize_now = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              curr_fo -> fo_hidden_base =
                                GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized -=
                  curr_fo->fo_object_size + sizeof(struct finalizable_object);

              i = HASH2(real_ptr, log_fo_table_size);
              fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
              GC_fo_entries++;
              GC_fnlz_roots.fo_head[i] = curr_fo;
              curr_fo = prev_fo;
            }
          }
          prev_fo = curr_fo;
          curr_fo = next_fo;
        }
      }
  }

  GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
# ifndef GC_TOGGLE_REFS_NOT_NEEDED
    GC_clear_togglerefs();
# endif
# ifndef GC_LONG_REFS_NOT_NEEDED
    GC_make_disappearing_links_disappear(&GC_ll_hashtbl);
    GC_remove_dangling_disappearing_links(&GC_ll_hashtbl);
# endif

  if (GC_fail_count) {
    /* Don't prevent running finalizers if there has been an allocation */
    /* failure recently.                                                */
#   ifdef THREADS
      GC_reset_finalizer_nested();
#   else
      GC_finalizer_nested = 0;
#   endif
  }
}
Esempio n. 13
0
/* and invoke finalizers.						*/
void GC_finalize()
{
    struct disappearing_link * curr_dl, * prev_dl, * next_dl;
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr, real_link;
    size_t i;
    size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
    size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
    /* PLTSCHEME: for resetting the disapearing link */

    /* PLTSCHEME: it's important to "push roots again" before
       making disappearing links disappear, because this
       step includes marking from ephemerons whose keys are
       reachable. We want to mark before disappearing links
       are disappeared. */
    if (GC_push_last_roots_again) GC_push_last_roots_again();

  /* Make disappearing links disappear */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
	/* PLTSCHEME: skip late dls: */
	if (curr_dl->dl_kind == LATE_DL) {
	  prev_dl = curr_dl;
	  curr_dl = dl_next(curr_dl);
	  continue;
	}
        real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
        real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
        if (!GC_is_marked(real_ptr)) {
            *(word *)real_link = 0;
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }

  /* PLTSCHEME: All eagers first */
  /* Enqueue for finalization all EAGER objects that are still		*/
  /* unreachable.							*/
    GC_bytes_finalized = 0;
    finalize_eagers(1);
    if (GC_push_last_roots_again) GC_push_last_roots_again();
    finalize_eagers(2);
    if (GC_push_last_roots_again) GC_push_last_roots_again();

  /* Mark all objects reachable via chains of 1 or more pointers	*/
  /* from finalizable objects.						*/
  /* PLTSCHEME: non-eager finalizations only (eagers already marked) */
#   ifdef PRINTSTATS
    GC_ASSERT(GC_mark_state == MS_NONE);
#   endif
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
	if (!(curr_fo -> eager_level)) { /* PLTSCHEME */
	  real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
	  if (!GC_is_marked(real_ptr)) {
            (*(curr_fo -> fo_mark_proc))(real_ptr);
            while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK();
            if (GC_mark_state != MS_NONE) {
	      /* Mark stack overflowed. Very unlikely. */
#		ifdef PRINTSTATS
	      if (GC_mark_state != MS_INVALID) ABORT("Bad mark state");
	      GC_printf("Mark stack overflowed in finalization!!\n");
#		endif
	      /* Make mark bits consistent again.  Forget about	*/
	      /* finalizing this object for now.			*/
	      GC_set_mark_bit(real_ptr);
	      while (!GC_mark_some((ptr_t)0));
            }
#if 0
            if (GC_is_marked(real_ptr)) {
	      /* PLTSCHEME: we have some ok cycles (below a parent) */
	      printf("Finalization cycle involving %lx\n", real_ptr);
            }
#endif
	  }
	}
      }
    }
  /* Enqueue for finalization all objects that are still		*/
  /* unreachable.							*/
    /* GC_bytes_finalized = 0; */ /* PLTSCHEME: done above */
    for (i = 0; i < fo_size; i++) {
      curr_fo = fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            GC_set_mark_bit(real_ptr);

            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (prev_fo == 0) {
                fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
            /* Add to list of objects awaiting finalization.	*/
              fo_set_next(curr_fo, GC_finalize_now);
              GC_finalize_now = curr_fo;
              /* unhide object pointer so any future collections will	*/
              /* see it.						*/
              curr_fo -> fo_hidden_base = 
              		(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                 	curr_fo -> fo_object_size
              		+ sizeof(struct finalizable_object);
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

    /* Remove dangling disappearing links. */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
        real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
        if (real_link != 0 && !GC_is_marked(real_link)) {
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }

    /* PLTSCHEME: late disappearing links */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
	/* PLTSCHEME: only late dls: */
	if (curr_dl->dl_kind != LATE_DL) {
	  prev_dl = curr_dl;
	  curr_dl = dl_next(curr_dl);
	  continue;
	}
        real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
        real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
        if (!GC_is_marked(real_ptr)) {
            *(word *)real_link = 0;
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }

    /* PLTSCHEME: */
    if (GC_custom_finalize)
      GC_custom_finalize();
}
Esempio n. 14
0
/* PLTSCHEME: eager finalization: */
static void finalize_eagers(int eager_level)
{
  struct finalizable_object * curr_fo, * prev_fo, * next_fo;
  struct finalizable_object * end_eager_mark;
  ptr_t real_ptr;
  int i;
  int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);

  end_eager_mark = GC_finalize_now; /* PLTSCHEME */
  for (i = 0; i < fo_size; i++) {
    curr_fo = fo_head[i];
    prev_fo = 0;
    while (curr_fo != 0) {
      if (curr_fo -> eager_level == eager_level) {
	real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
	if (!GC_is_marked(real_ptr)) {
	  /* We assume that (non-eager) finalization orders do not
	     need to take into account connections through memory
	     with eager finalizations. Otherwise, this mark bit
	     could break the chain from one (non-eager) finalizeable
	     to another. */
	  GC_set_mark_bit(real_ptr);
	  
	  /* Delete from hash table */
	  next_fo = fo_next(curr_fo);
	  if (prev_fo == 0) {
	    fo_head[i] = next_fo;
	  } else {
	    fo_set_next(prev_fo, next_fo);
	  }
	  GC_fo_entries--;
	  /* Add to list of objects awaiting finalization.	*/
	  fo_set_next(curr_fo, GC_finalize_now);
	  GC_finalize_now = curr_fo;
	  /* unhide object pointer so any future collections will	*/
	  /* see it.						*/
	  curr_fo -> fo_hidden_base = 
	    (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
	  GC_bytes_finalized +=
	    curr_fo -> fo_object_size
	      + sizeof(struct finalizable_object);
#	    ifdef PRINTSTATS
	  if (!GC_is_marked((ptr_t)curr_fo)) {
	    ABORT("GC_finalize: found accessible unmarked object\n");
	  }
#	    endif
	  curr_fo = next_fo;
	} else {
	  prev_fo = curr_fo;
	  curr_fo = fo_next(curr_fo);
	}
      } else {
	prev_fo = curr_fo;
	curr_fo = fo_next(curr_fo);
      }
    }
  }
  
  /* PLTSCHEME: Mark from queued eagers: */
  for (curr_fo = GC_finalize_now; curr_fo != end_eager_mark; curr_fo = fo_next(curr_fo)) {
    /* PLTSCHEME: if this is an eager finalization, then objects
       accessible from real_ptr need to be marked */
    if (curr_fo -> eager_level == eager_level) {
      (*(curr_fo -> fo_mark_proc))(curr_fo -> fo_hidden_base);
      while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK();
      if (GC_mark_state != MS_NONE) {
	/* Mark stack overflowed. Very unlikely. 
	   Everything's ok, though. Just mark from scratch. */
	while (!GC_mark_some((ptr_t)0));
      }
    }
  }
}