Ejemplo n.º 1
0
  GC_INNER void GC_print_finalization_stats(void)
  {
    struct finalizable_object *fo = GC_finalize_now;
    unsigned long ready = 0;

#   ifndef GC_LONG_REFS_NOT_NEEDED
    GC_log_printf(
        "%lu finalization table entries; "
        "%lu short/%lu long disappearing links alive\n",
        (unsigned long)GC_fo_entries,
        (unsigned long)GC_dl_hashtbl.entries,
        (unsigned long)GC_ll_hashtbl.entries);
    for (; 0 != fo; fo = fo_next(fo)) ++ready;
    GC_log_printf("%lu objects are ready for finalization; "
                  "%ld short/%ld long links cleared\n",
                  ready,
                  (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
                  (long)GC_old_ll_entries - (long)GC_ll_hashtbl.entries);
#   else /* GC_LONG_REFS_NOT_NEEDED */
    GC_log_printf(
        "%lu finalization table entries; "
        "%lu disappearing links alive\n",
        (unsigned long)GC_fo_entries,
        (unsigned long)GC_dl_hashtbl.entries);
    for (; 0 != fo; fo = fo_next(fo)) ++ready;
    GC_log_printf("%lu objects are ready for finalization; "
                  "%ld links cleared\n",
                  ready,
                  (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries);
#   endif /* !GC_LONG_REFS_NOT_NEEDED */
  }
Ejemplo n.º 2
0
/* Should be called without allocation lock.				*/
int GC_invoke_finalizers()
{
    static int doing = 0; /* PLTSCHEME */
    struct finalizable_object * curr_fo;
    int count = 0;
    word bytes_freed_before = 0;
    DCL_LOCK_STATE;

    /* PLTSCHEME: don't allow nested finalizations */
    if (doing)
      return 0;
    doing++;
    
    while (GC_finalize_now != 0) {
#	ifdef THREADS
	    LOCK();
#	endif
	if (count == 0) {
	    bytes_freed_before = GC_bytes_freed;
            /* Don't do this outside, since we need the lock. */
	}
    	curr_fo = GC_finalize_now;
#	ifdef THREADS
 	    if (curr_fo != 0) GC_finalize_now = fo_next(curr_fo);
	    UNLOCK();
	    if (curr_fo == 0) break;
#	else
	    GC_finalize_now = fo_next(curr_fo);
#	endif
 	fo_set_next(curr_fo, 0);
    	(*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
    			      curr_fo -> fo_client_data);
    	curr_fo -> fo_client_data = 0;
	++count;
#	ifdef UNDEFINED
	    /* This is probably a bad idea.  It throws off accounting if */
	    /* nearly all objects are finalizable.  O.w. it shouldn't	 */
	    /* matter.							 */
    	    GC_free((void *)curr_fo);
#	endif
    }

    doing--; /* PLTSCHEME */

    /* bytes_freed_before is initialized whenever count != 0 */
    if (count != 0 && bytes_freed_before != GC_bytes_freed) {
        LOCK();
	GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
	UNLOCK();
    }
    return count;
}
Ejemplo n.º 3
0
void GC_dump_finalization(void)
{
    struct disappearing_link * curr_dl;
    struct finalizable_object * curr_fo;
    ptr_t real_ptr, real_link;
    int dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
    int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
    int i;

    GC_printf("Disappearing links:\n");
    for (i = 0; i < dl_size; i++) {
      for (curr_dl = dl_head[i]; curr_dl != 0; curr_dl = dl_next(curr_dl)) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
        real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
        GC_printf("Object: %p, Link:%p\n", real_ptr, real_link);
      }
    }
    GC_printf("Finalizers:\n");
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        GC_printf("Finalizable object: %p\n", real_ptr);
      }
    }
}
Ejemplo n.º 4
0
GC_INNER int GC_clone_finalizer(void * src_p, void * dest_p)
{
    struct finalizable_object * curr_fo;
    size_t index;
    ptr_t base, result;
    GC_finalization_proc fn;
    ptr_t cd;
    finalization_mark_proc mp;
    DCL_LOCK_STATE;

    base = (ptr_t)src_p;
    result = (ptr_t)dest_p;
    LOCK();
    index = HASH2(base, log_fo_table_size);
    curr_fo = GC_fo_head[index];
    while (curr_fo != 0) {
      GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
      if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(base)) {
          fn = curr_fo -> fo_fn;
          cd = curr_fo -> fo_client_data;
          mp = curr_fo -> fo_mark_proc;
          UNLOCK();
          return GC_register_finalizer_inner(result, fn, cd, 0, 0, mp);
      }
      curr_fo = fo_next(curr_fo);
    }
    UNLOCK();
    return TRUE;
}
Ejemplo n.º 5
0
  /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
  STATIC void GC_enqueue_all_finalizers(void)
  {
    struct finalizable_object * curr_fo, * next_fo;
    ptr_t real_ptr;
    int i;
    int fo_size;

    fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = GC_fnlz_roots.fo_head[i];
      GC_fnlz_roots.fo_head[i] = NULL;
      while (curr_fo != NULL) {
          real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
          GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
          GC_set_mark_bit(real_ptr);

          next_fo = fo_next(curr_fo);

          /* Add to list of objects awaiting finalization.      */
          fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
          GC_fnlz_roots.finalize_now = curr_fo;

          /* unhide object pointer so any future collections will       */
          /* see it.                                                    */
          curr_fo -> fo_hidden_base =
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
          GC_bytes_finalized +=
                curr_fo -> fo_object_size + sizeof(struct finalizable_object);
          curr_fo = next_fo;
        }
    }
    GC_fo_entries = 0;  /* all entries deleted from the hash table */
  }
Ejemplo n.º 6
0
void GC_print_finalization_stats(void)
{
    struct finalizable_object *fo = GC_finalize_now;
    size_t ready = 0;

    GC_printf("%u finalization table entries; %u disappearing links\n",
	       GC_fo_entries, GC_dl_entries);
    for (; 0 != fo; fo = fo_next(fo)) ++ready;
    GC_printf("%u objects are eligible for immediate finalization\n", ready);
}
Ejemplo n.º 7
0
/* Should be called without allocation lock.                            */
GC_API int GC_CALL GC_invoke_finalizers(void)
{
    struct finalizable_object * curr_fo;
    int count = 0;
    word bytes_freed_before = 0; /* initialized to prevent warning. */
    DCL_LOCK_STATE;

    while (GC_fnlz_roots.finalize_now != NULL) {
#       ifdef THREADS
        LOCK();
#       endif
        if (count == 0) {
            bytes_freed_before = GC_bytes_freed;
            /* Don't do this outside, since we need the lock. */
        }
        curr_fo = GC_fnlz_roots.finalize_now;
#       ifdef THREADS
        if (curr_fo != 0) GC_fnlz_roots.finalize_now = fo_next(curr_fo);
        UNLOCK();
        if (curr_fo == 0) break;
#       else
        GC_fnlz_roots.finalize_now = fo_next(curr_fo);
#       endif
        fo_set_next(curr_fo, 0);
        (*(curr_fo -> fo_fn))((ptr_t)(curr_fo -> fo_hidden_base),
                              curr_fo -> fo_client_data);
        curr_fo -> fo_client_data = 0;
        ++count;
#       ifdef UNDEFINED
        /* This is probably a bad idea.  It throws off accounting if */
        /* nearly all objects are finalizable.  O.w. it shouldn't    */
        /* matter.                                                   */
        GC_free((void *)curr_fo);
#       endif
    }
    /* bytes_freed_before is initialized whenever count != 0 */
    if (count != 0 && bytes_freed_before != GC_bytes_freed) {
        LOCK();
        GC_finalizer_bytes_freed += (GC_bytes_freed - bytes_freed_before);
        UNLOCK();
    }
    return count;
}
Ejemplo n.º 8
0
/* Enqueue all remaining finalizers to be run - Assumes lock is
 * held, and signals are disabled */
void GC_enqueue_all_finalizers(void)
{
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr;
    int i;
    int fo_size;
    
    fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
        curr_fo = fo_head[i];
        prev_fo = 0;
      while (curr_fo != 0) {
          real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
          GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
          GC_set_mark_bit(real_ptr);
 
          /* Delete from hash table */
          next_fo = fo_next(curr_fo);
          if (prev_fo == 0) {
              fo_head[i] = next_fo;
          } else {
              fo_set_next(prev_fo, next_fo);
          }
          GC_fo_entries--;

          /* Add to list of objects awaiting finalization.	*/
          fo_set_next(curr_fo, GC_finalize_now);
          GC_finalize_now = curr_fo;

          /* unhide object pointer so any future collections will	*/
          /* see it.						*/
          curr_fo -> fo_hidden_base = 
        		(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);

          GC_bytes_finalized +=
           	curr_fo -> fo_object_size
        		+ sizeof(struct finalizable_object);
          curr_fo = next_fo;
        }
    }

    return;
}
Ejemplo n.º 9
0
  GC_INNER void GC_print_finalization_stats(void)
  {
    struct finalizable_object *fo;
    unsigned long ready = 0;

    GC_log_printf("%lu finalization entries;"
                  " %lu/%lu short/long disappearing links alive\n",
                  (unsigned long)GC_fo_entries,
                  (unsigned long)GC_dl_hashtbl.entries,
                  (unsigned long)IF_LONG_REFS_PRESENT_ELSE(
                                                GC_ll_hashtbl.entries, 0));

    for (fo = GC_fnlz_roots.finalize_now; fo != NULL; fo = fo_next(fo))
      ++ready;
    GC_log_printf("%lu finalization-ready objects;"
                  " %ld/%ld short/long links cleared\n",
                  ready,
                  (long)GC_old_dl_entries - (long)GC_dl_hashtbl.entries,
                  (long)IF_LONG_REFS_PRESENT_ELSE(
                              GC_old_ll_entries - GC_ll_hashtbl.entries, 0));
  }
Ejemplo n.º 10
0
Archivo: finalize.c Proyecto: 8l/lllm
  void GC_dump_finalization(void)
  {
    struct finalizable_object * curr_fo;
    size_t fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
    ptr_t real_ptr;
    size_t i;

    GC_printf("Disappearing (short) links:\n");
    GC_dump_finalization_links(&GC_dl_hashtbl);
#   ifndef GC_LONG_REFS_NOT_NEEDED
      GC_printf("Disappearing long links:\n");
      GC_dump_finalization_links(&GC_ll_hashtbl);
#   endif
    GC_printf("Finalizers:\n");
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = GC_fo_head[i]; curr_fo != 0;
           curr_fo = fo_next(curr_fo)) {
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        GC_printf("Finalizable object: %p\n", real_ptr);
      }
    }
  }
Ejemplo n.º 11
0
/* and invoke finalizers.						*/
void GC_finalize(void)
{
    struct disappearing_link * curr_dl, * prev_dl, * next_dl;
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr, real_link;
    size_t i;
    size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
    size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
    
  /* Make disappearing links disappear */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
        real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
        if (!GC_is_marked(real_ptr)) {
            *(word *)real_link = 0;
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }
  /* Mark all objects reachable via chains of 1 or more pointers	*/
  /* from finalizable objects.						*/
    GC_ASSERT(GC_mark_state == MS_NONE);
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
	    GC_MARKED_FOR_FINALIZATION(real_ptr);
            GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
            if (GC_is_marked(real_ptr)) {
                WARN("Finalization cycle involving %lx\n", real_ptr);
            }
        }
      }
    }
  /* Enqueue for finalization all objects that are still		*/
  /* unreachable.							*/
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
	    if (!GC_java_finalization) {
              GC_set_mark_bit(real_ptr);
	    }
            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (prev_fo == 0) {
                fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
            /* Add to list of objects awaiting finalization.	*/
              fo_set_next(curr_fo, GC_finalize_now);
              GC_finalize_now = curr_fo;
              /* unhide object pointer so any future collections will	*/
              /* see it.						*/
              curr_fo -> fo_hidden_base = 
              		(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                 	curr_fo -> fo_object_size
              		+ sizeof(struct finalizable_object);
	    GC_ASSERT(GC_is_marked(GC_base((ptr_t)curr_fo)));
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

  if (GC_java_finalization) {
    /* make sure we mark everything reachable from objects finalized
       using the no_order mark_proc */
      for (curr_fo = GC_finalize_now; 
  	 curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
  	real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
  	if (!GC_is_marked(real_ptr)) {
  	    if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
  	        GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
  	    }
	    if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
		GC_set_mark_bit(real_ptr);
	    }
  	}
      }

    /* now revive finalize-when-unreachable objects reachable from
       other finalizable objects */
      if (need_unreachable_finalization) {
        curr_fo = GC_finalize_now;
        prev_fo = 0;
        while (curr_fo != 0) {
	  next_fo = fo_next(curr_fo);
	  if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
	    real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
	    if (!GC_is_marked(real_ptr)) {
	      GC_set_mark_bit(real_ptr);
	    } else {
	      if (prev_fo == 0)
		GC_finalize_now = next_fo;
	      else
		fo_set_next(prev_fo, next_fo);

              curr_fo -> fo_hidden_base =
              		(word) HIDE_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized -=
                 	curr_fo -> fo_object_size + sizeof(struct finalizable_object);

	      i = HASH2(real_ptr, log_fo_table_size);
	      fo_set_next (curr_fo, fo_head[i]);
	      GC_fo_entries++;
	      fo_head[i] = curr_fo;
	      curr_fo = prev_fo;
	    }
	  }
	  prev_fo = curr_fo;
	  curr_fo = next_fo;
        }
      }
  }

  /* Remove dangling disappearing links. */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
        real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
        if (real_link != 0 && !GC_is_marked(real_link)) {
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }
}
Ejemplo n.º 12
0
/* finalized when this finalizer is invoked.			*/
GC_API void GC_register_finalizer_inner(void * obj,
					GC_finalization_proc fn, void *cd,
					GC_finalization_proc *ofn, void **ocd,
					finalization_mark_proc mp)
{
    ptr_t base;
    struct finalizable_object * curr_fo, * prev_fo;
    size_t index;
    struct finalizable_object *new_fo;
    hdr *hhdr;
    DCL_LOCK_STATE;

#   ifdef THREADS
	LOCK();
#   endif
    if (log_fo_table_size == -1
        || GC_fo_entries > ((word)1 << log_fo_table_size)) {
    	GC_grow_table((struct hash_chain_entry ***)(&fo_head),
    		      &log_fo_table_size);
	if (GC_print_stats) {
	    GC_log_printf("Grew fo table to %u entries\n",
	    	          (1 << log_fo_table_size));
	}
    }
    /* in the THREADS case signals are disabled and we hold allocation	*/
    /* lock; otherwise neither is true.  Proceed carefully.		*/
    base = (ptr_t)obj;
    index = HASH2(base, log_fo_table_size);
    prev_fo = 0; curr_fo = fo_head[index];
    while (curr_fo != 0) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        if (curr_fo -> fo_hidden_base == HIDE_POINTER(base)) {
            /* Interruption by a signal in the middle of this	*/
            /* should be safe.  The client may see only *ocd	*/
            /* updated, but we'll declare that to be his	*/
            /* problem.						*/
            if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
            if (ofn) *ofn = curr_fo -> fo_fn;
            /* Delete the structure for base. */
                if (prev_fo == 0) {
                  fo_head[index] = fo_next(curr_fo);
                } else {
                  fo_set_next(prev_fo, fo_next(curr_fo));
                }
            if (fn == 0) {
                GC_fo_entries--;
                  /* May not happen if we get a signal.  But a high	*/
                  /* estimate will only make the table larger than	*/
                  /* necessary.						*/
#		if !defined(THREADS) && !defined(DBG_HDRS_ALL)
                  GC_free((void *)curr_fo);
#		endif
            } else {
                curr_fo -> fo_fn = fn;
                curr_fo -> fo_client_data = (ptr_t)cd;
                curr_fo -> fo_mark_proc = mp;
		/* Reinsert it.  We deleted it first to maintain	*/
		/* consistency in the event of a signal.		*/
		if (prev_fo == 0) {
                  fo_head[index] = curr_fo;
                } else {
                  fo_set_next(prev_fo, curr_fo);
                }
            }
#	    ifdef THREADS
                UNLOCK();
#	    endif
            return;
        }
        prev_fo = curr_fo;
        curr_fo = fo_next(curr_fo);
    }
    if (ofn) *ofn = 0;
    if (ocd) *ocd = 0;
    if (fn == 0) {
#	ifdef THREADS
            UNLOCK();
#	endif
        return;
    }
    GET_HDR(base, hhdr);
    if (0 == hhdr) {
      /* We won't collect it, hence finalizer wouldn't be run. */
#     ifdef THREADS
          UNLOCK();
#     endif
      return;
    }
    new_fo = (struct finalizable_object *)
    	GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
    if (EXPECT(0 == new_fo, FALSE)) {
#     ifdef THREADS
	UNLOCK();
#     endif
      new_fo = (struct finalizable_object *)
	      GC_oom_fn(sizeof(struct finalizable_object));
      if (0 == new_fo) {
	GC_finalization_failures++;
	return;
      }
      /* It's not likely we'll make it here, but ... */
#     ifdef THREADS
	LOCK();
#     endif
    }
    GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
    new_fo -> fo_hidden_base = (word)HIDE_POINTER(base);
    new_fo -> fo_fn = fn;
    new_fo -> fo_client_data = (ptr_t)cd;
    new_fo -> fo_object_size = hhdr -> hb_sz;
    new_fo -> fo_mark_proc = mp;
    fo_set_next(new_fo, fo_head[index]);
    GC_fo_entries++;
    fo_head[index] = new_fo;
#   ifdef THREADS
        UNLOCK();
#   endif
}
Ejemplo n.º 13
0
/* enqueued for finalization.                                           */
GC_INNER void GC_finalize(void)
{
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr;
    size_t i;
    size_t fo_size = log_fo_table_size == -1 ? 0 :
                                (size_t)1 << log_fo_table_size;

#   ifndef SMALL_CONFIG
      /* Save current GC_[dl/ll]_entries value for stats printing */
      GC_old_dl_entries = GC_dl_hashtbl.entries;
#     ifndef GC_LONG_REFS_NOT_NEEDED
        GC_old_ll_entries = GC_ll_hashtbl.entries;
#     endif
#   endif

#   ifndef GC_TOGGLE_REFS_NOT_NEEDED
      GC_mark_togglerefs();
#   endif
    GC_make_disappearing_links_disappear(&GC_dl_hashtbl);

  /* Mark all objects reachable via chains of 1 or more pointers        */
  /* from finalizable objects.                                          */
    GC_ASSERT(GC_mark_state == MS_NONE);
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = GC_fnlz_roots.fo_head[i];
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            GC_MARKED_FOR_FINALIZATION(real_ptr);
            GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
            if (GC_is_marked(real_ptr)) {
                WARN("Finalization cycle involving %p\n", real_ptr);
            }
        }
      }
    }
  /* Enqueue for finalization all objects that are still                */
  /* unreachable.                                                       */
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = GC_fnlz_roots.fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            if (!GC_java_finalization) {
              GC_set_mark_bit(real_ptr);
            }
            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (NULL == prev_fo) {
                GC_fnlz_roots.fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
              if (GC_object_finalized_proc)
                GC_object_finalized_proc(real_ptr);

            /* Add to list of objects awaiting finalization.    */
              fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
              GC_fnlz_roots.finalize_now = curr_fo;
              /* unhide object pointer so any future collections will   */
              /* see it.                                                */
              curr_fo -> fo_hidden_base =
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                        curr_fo -> fo_object_size
                        + sizeof(struct finalizable_object);
            GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

  if (GC_java_finalization) {
    /* make sure we mark everything reachable from objects finalized
       using the no_order mark_proc */
      for (curr_fo = GC_fnlz_roots.finalize_now;
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
        real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
        if (!GC_is_marked(real_ptr)) {
            if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
                GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
            }
            if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
                GC_set_mark_bit(real_ptr);
            }
        }
      }

    /* now revive finalize-when-unreachable objects reachable from
       other finalizable objects */
      if (need_unreachable_finalization) {
        curr_fo = GC_fnlz_roots.finalize_now;
        prev_fo = NULL;
        while (curr_fo != NULL) {
          next_fo = fo_next(curr_fo);
          if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
            real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
            if (!GC_is_marked(real_ptr)) {
              GC_set_mark_bit(real_ptr);
            } else {
              if (NULL == prev_fo) {
                GC_fnlz_roots.finalize_now = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              curr_fo -> fo_hidden_base =
                                GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized -=
                  curr_fo->fo_object_size + sizeof(struct finalizable_object);

              i = HASH2(real_ptr, log_fo_table_size);
              fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
              GC_fo_entries++;
              GC_fnlz_roots.fo_head[i] = curr_fo;
              curr_fo = prev_fo;
            }
          }
          prev_fo = curr_fo;
          curr_fo = next_fo;
        }
      }
  }

  GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
# ifndef GC_TOGGLE_REFS_NOT_NEEDED
    GC_clear_togglerefs();
# endif
# ifndef GC_LONG_REFS_NOT_NEEDED
    GC_make_disappearing_links_disappear(&GC_ll_hashtbl);
    GC_remove_dangling_disappearing_links(&GC_ll_hashtbl);
# endif

  if (GC_fail_count) {
    /* Don't prevent running finalizers if there has been an allocation */
    /* failure recently.                                                */
#   ifdef THREADS
      GC_reset_finalizer_nested();
#   else
      GC_finalizer_nested = 0;
#   endif
  }
}
Ejemplo n.º 14
0
/* finalized when this finalizer is invoked.                    */
STATIC void GC_register_finalizer_inner(void * obj,
                                        GC_finalization_proc fn, void *cd,
                                        GC_finalization_proc *ofn, void **ocd,
                                        finalization_mark_proc mp)
{
    ptr_t base;
    struct finalizable_object * curr_fo, * prev_fo;
    size_t index;
    struct finalizable_object *new_fo = 0;
    hdr *hhdr = NULL; /* initialized to prevent warning. */
    GC_oom_func oom_fn;
    DCL_LOCK_STATE;

    LOCK();
    if (log_fo_table_size == -1
        || GC_fo_entries > ((word)1 << log_fo_table_size)) {
        GC_grow_table((struct hash_chain_entry ***)&GC_fnlz_roots.fo_head,
                      &log_fo_table_size);
        GC_COND_LOG_PRINTF("Grew fo table to %u entries\n",
                           1 << (unsigned)log_fo_table_size);
    }
    /* in the THREADS case we hold allocation lock.             */
    base = (ptr_t)obj;
    for (;;) {
      index = HASH2(base, log_fo_table_size);
      prev_fo = 0;
      curr_fo = GC_fnlz_roots.fo_head[index];
      while (curr_fo != 0) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        if (curr_fo -> fo_hidden_base == GC_HIDE_POINTER(base)) {
          /* Interruption by a signal in the middle of this     */
          /* should be safe.  The client may see only *ocd      */
          /* updated, but we'll declare that to be his problem. */
          if (ocd) *ocd = (void *) (curr_fo -> fo_client_data);
          if (ofn) *ofn = curr_fo -> fo_fn;
          /* Delete the structure for base. */
          if (prev_fo == 0) {
            GC_fnlz_roots.fo_head[index] = fo_next(curr_fo);
          } else {
            fo_set_next(prev_fo, fo_next(curr_fo));
          }
          if (fn == 0) {
            GC_fo_entries--;
            /* May not happen if we get a signal.  But a high   */
            /* estimate will only make the table larger than    */
            /* necessary.                                       */
#           if !defined(THREADS) && !defined(DBG_HDRS_ALL)
              GC_free((void *)curr_fo);
#           endif
          } else {
            curr_fo -> fo_fn = fn;
            curr_fo -> fo_client_data = (ptr_t)cd;
            curr_fo -> fo_mark_proc = mp;
            /* Reinsert it.  We deleted it first to maintain    */
            /* consistency in the event of a signal.            */
            if (prev_fo == 0) {
              GC_fnlz_roots.fo_head[index] = curr_fo;
            } else {
              fo_set_next(prev_fo, curr_fo);
            }
          }
          UNLOCK();
#         ifndef DBG_HDRS_ALL
            if (EXPECT(new_fo != 0, FALSE)) {
              /* Free unused new_fo returned by GC_oom_fn() */
              GC_free((void *)new_fo);
            }
#         endif
          return;
        }
        prev_fo = curr_fo;
        curr_fo = fo_next(curr_fo);
      }
      if (EXPECT(new_fo != 0, FALSE)) {
        /* new_fo is returned by GC_oom_fn(), so fn != 0 and hhdr != 0. */
        break;
      }
      if (fn == 0) {
        if (ocd) *ocd = 0;
        if (ofn) *ofn = 0;
        UNLOCK();
        return;
      }
      GET_HDR(base, hhdr);
      if (EXPECT(0 == hhdr, FALSE)) {
        /* We won't collect it, hence finalizer wouldn't be run. */
        if (ocd) *ocd = 0;
        if (ofn) *ofn = 0;
        UNLOCK();
        return;
      }
      new_fo = (struct finalizable_object *)
        GC_INTERNAL_MALLOC(sizeof(struct finalizable_object),NORMAL);
      if (EXPECT(new_fo != 0, TRUE))
        break;
      oom_fn = GC_oom_fn;
      UNLOCK();
      new_fo = (struct finalizable_object *)
                (*oom_fn)(sizeof(struct finalizable_object));
      if (0 == new_fo) {
        /* No enough memory.  *ocd and *ofn remains unchanged.  */
        return;
      }
      /* It's not likely we'll make it here, but ... */
      LOCK();
      /* Recalculate index since the table may grow and         */
      /* check again that our finalizer is not in the table.    */
    }
    GC_ASSERT(GC_size(new_fo) >= sizeof(struct finalizable_object));
    if (ocd) *ocd = 0;
    if (ofn) *ofn = 0;
    new_fo -> fo_hidden_base = GC_HIDE_POINTER(base);
    new_fo -> fo_fn = fn;
    new_fo -> fo_client_data = (ptr_t)cd;
    new_fo -> fo_object_size = hhdr -> hb_sz;
    new_fo -> fo_mark_proc = mp;
    fo_set_next(new_fo, GC_fnlz_roots.fo_head[index]);
    GC_fo_entries++;
    GC_fnlz_roots.fo_head[index] = new_fo;
    UNLOCK();
}
Ejemplo n.º 15
0
/* and invoke finalizers.						*/
void GC_finalize()
{
    struct disappearing_link * curr_dl, * prev_dl, * next_dl;
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr, real_link;
    size_t i;
    size_t dl_size = (log_dl_table_size == -1 ) ? 0 : (1 << log_dl_table_size);
    size_t fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);
    /* PLTSCHEME: for resetting the disapearing link */

    /* PLTSCHEME: it's important to "push roots again" before
       making disappearing links disappear, because this
       step includes marking from ephemerons whose keys are
       reachable. We want to mark before disappearing links
       are disappeared. */
    if (GC_push_last_roots_again) GC_push_last_roots_again();

  /* Make disappearing links disappear */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
	/* PLTSCHEME: skip late dls: */
	if (curr_dl->dl_kind == LATE_DL) {
	  prev_dl = curr_dl;
	  curr_dl = dl_next(curr_dl);
	  continue;
	}
        real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
        real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
        if (!GC_is_marked(real_ptr)) {
            *(word *)real_link = 0;
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }

  /* PLTSCHEME: All eagers first */
  /* Enqueue for finalization all EAGER objects that are still		*/
  /* unreachable.							*/
    GC_bytes_finalized = 0;
    finalize_eagers(1);
    if (GC_push_last_roots_again) GC_push_last_roots_again();
    finalize_eagers(2);
    if (GC_push_last_roots_again) GC_push_last_roots_again();

  /* Mark all objects reachable via chains of 1 or more pointers	*/
  /* from finalizable objects.						*/
  /* PLTSCHEME: non-eager finalizations only (eagers already marked) */
#   ifdef PRINTSTATS
    GC_ASSERT(GC_mark_state == MS_NONE);
#   endif
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = fo_head[i]; curr_fo != 0; curr_fo = fo_next(curr_fo)) {
	if (!(curr_fo -> eager_level)) { /* PLTSCHEME */
	  real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
	  if (!GC_is_marked(real_ptr)) {
            (*(curr_fo -> fo_mark_proc))(real_ptr);
            while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK();
            if (GC_mark_state != MS_NONE) {
	      /* Mark stack overflowed. Very unlikely. */
#		ifdef PRINTSTATS
	      if (GC_mark_state != MS_INVALID) ABORT("Bad mark state");
	      GC_printf("Mark stack overflowed in finalization!!\n");
#		endif
	      /* Make mark bits consistent again.  Forget about	*/
	      /* finalizing this object for now.			*/
	      GC_set_mark_bit(real_ptr);
	      while (!GC_mark_some((ptr_t)0));
            }
#if 0
            if (GC_is_marked(real_ptr)) {
	      /* PLTSCHEME: we have some ok cycles (below a parent) */
	      printf("Finalization cycle involving %lx\n", real_ptr);
            }
#endif
	  }
	}
      }
    }
  /* Enqueue for finalization all objects that are still		*/
  /* unreachable.							*/
    /* GC_bytes_finalized = 0; */ /* PLTSCHEME: done above */
    for (i = 0; i < fo_size; i++) {
      curr_fo = fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            GC_set_mark_bit(real_ptr);

            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (prev_fo == 0) {
                fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
            /* Add to list of objects awaiting finalization.	*/
              fo_set_next(curr_fo, GC_finalize_now);
              GC_finalize_now = curr_fo;
              /* unhide object pointer so any future collections will	*/
              /* see it.						*/
              curr_fo -> fo_hidden_base = 
              		(word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                 	curr_fo -> fo_object_size
              		+ sizeof(struct finalizable_object);
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

    /* Remove dangling disappearing links. */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
        real_link = GC_base((ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link));
        if (real_link != 0 && !GC_is_marked(real_link)) {
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }

    /* PLTSCHEME: late disappearing links */
    for (i = 0; i < dl_size; i++) {
      curr_dl = dl_head[i];
      prev_dl = 0;
      while (curr_dl != 0) {
	/* PLTSCHEME: only late dls: */
	if (curr_dl->dl_kind != LATE_DL) {
	  prev_dl = curr_dl;
	  curr_dl = dl_next(curr_dl);
	  continue;
	}
        real_ptr = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_obj);
        real_link = (ptr_t)REVEAL_POINTER(curr_dl -> dl_hidden_link);
        if (!GC_is_marked(real_ptr)) {
            *(word *)real_link = 0;
            next_dl = dl_next(curr_dl);
            if (prev_dl == 0) {
                dl_head[i] = next_dl;
            } else {
                dl_set_next(prev_dl, next_dl);
            }
            GC_clear_mark_bit((ptr_t)curr_dl);
            GC_dl_entries--;
            curr_dl = next_dl;
        } else {
            prev_dl = curr_dl;
            curr_dl = dl_next(curr_dl);
        }
      }
    }

    /* PLTSCHEME: */
    if (GC_custom_finalize)
      GC_custom_finalize();
}
Ejemplo n.º 16
0
/* PLTSCHEME: eager finalization: */
static void finalize_eagers(int eager_level)
{
  struct finalizable_object * curr_fo, * prev_fo, * next_fo;
  struct finalizable_object * end_eager_mark;
  ptr_t real_ptr;
  int i;
  int fo_size = (log_fo_table_size == -1 ) ? 0 : (1 << log_fo_table_size);

  end_eager_mark = GC_finalize_now; /* PLTSCHEME */
  for (i = 0; i < fo_size; i++) {
    curr_fo = fo_head[i];
    prev_fo = 0;
    while (curr_fo != 0) {
      if (curr_fo -> eager_level == eager_level) {
	real_ptr = (ptr_t)REVEAL_POINTER(curr_fo -> fo_hidden_base);
	if (!GC_is_marked(real_ptr)) {
	  /* We assume that (non-eager) finalization orders do not
	     need to take into account connections through memory
	     with eager finalizations. Otherwise, this mark bit
	     could break the chain from one (non-eager) finalizeable
	     to another. */
	  GC_set_mark_bit(real_ptr);
	  
	  /* Delete from hash table */
	  next_fo = fo_next(curr_fo);
	  if (prev_fo == 0) {
	    fo_head[i] = next_fo;
	  } else {
	    fo_set_next(prev_fo, next_fo);
	  }
	  GC_fo_entries--;
	  /* Add to list of objects awaiting finalization.	*/
	  fo_set_next(curr_fo, GC_finalize_now);
	  GC_finalize_now = curr_fo;
	  /* unhide object pointer so any future collections will	*/
	  /* see it.						*/
	  curr_fo -> fo_hidden_base = 
	    (word) REVEAL_POINTER(curr_fo -> fo_hidden_base);
	  GC_bytes_finalized +=
	    curr_fo -> fo_object_size
	      + sizeof(struct finalizable_object);
#	    ifdef PRINTSTATS
	  if (!GC_is_marked((ptr_t)curr_fo)) {
	    ABORT("GC_finalize: found accessible unmarked object\n");
	  }
#	    endif
	  curr_fo = next_fo;
	} else {
	  prev_fo = curr_fo;
	  curr_fo = fo_next(curr_fo);
	}
      } else {
	prev_fo = curr_fo;
	curr_fo = fo_next(curr_fo);
      }
    }
  }
  
  /* PLTSCHEME: Mark from queued eagers: */
  for (curr_fo = GC_finalize_now; curr_fo != end_eager_mark; curr_fo = fo_next(curr_fo)) {
    /* PLTSCHEME: if this is an eager finalization, then objects
       accessible from real_ptr need to be marked */
    if (curr_fo -> eager_level == eager_level) {
      (*(curr_fo -> fo_mark_proc))(curr_fo -> fo_hidden_base);
      while (!GC_mark_stack_empty()) MARK_FROM_MARK_STACK();
      if (GC_mark_state != MS_NONE) {
	/* Mark stack overflowed. Very unlikely. 
	   Everything's ok, though. Just mark from scratch. */
	while (!GC_mark_some((ptr_t)0));
      }
    }
  }
}