Exemple #1
0
  /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
  STATIC void GC_enqueue_all_finalizers(void)
  {
    struct finalizable_object * curr_fo, * next_fo;
    ptr_t real_ptr;
    int i;
    int fo_size;

    fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = GC_fnlz_roots.fo_head[i];
      GC_fnlz_roots.fo_head[i] = NULL;
      while (curr_fo != NULL) {
          real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
          GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
          GC_set_mark_bit(real_ptr);

          next_fo = fo_next(curr_fo);

          /* Add to list of objects awaiting finalization.      */
          fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
          GC_fnlz_roots.finalize_now = curr_fo;

          /* unhide object pointer so any future collections will       */
          /* see it.                                                    */
          curr_fo -> fo_hidden_base =
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
          GC_bytes_finalized +=
                curr_fo -> fo_object_size + sizeof(struct finalizable_object);
          curr_fo = next_fo;
        }
    }
    GC_fo_entries = 0;  /* all entries deleted from the hash table */
  }
Exemple #2
0
GC_INLINE void GC_make_disappearing_links_disappear(
                                struct dl_hashtbl_s* dl_hashtbl)
{
    struct disappearing_link *curr, *prev, *next;
    ptr_t real_ptr, real_link;

    ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
        real_ptr = GC_REVEAL_POINTER(curr -> dl_hidden_obj);
        real_link = GC_REVEAL_POINTER(curr -> dl_hidden_link);
        if (!GC_is_marked(real_ptr)) {
            *(word *)real_link = 0;
            GC_clear_mark_bit(curr);
            DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
        }
    ITERATE_DL_HASHTBL_END(curr, prev)
}
Exemple #3
0
  STATIC void GC_dump_finalization_links(struct dl_hashtbl_s* dl_hashtbl)
  {
    struct disappearing_link * curr_dl;
    ptr_t real_ptr, real_link;
    size_t dl_size = dl_hashtbl -> log_size == -1 ? 0 :
                                1 << dl_hashtbl -> log_size;
    size_t i;

    for (i = 0; i < dl_size; i++) {
      for (curr_dl = dl_hashtbl -> head[i]; curr_dl != 0;
           curr_dl = dl_next(curr_dl)) {
        real_ptr = GC_REVEAL_POINTER(curr_dl -> dl_hidden_obj);
        real_link = GC_REVEAL_POINTER(curr_dl -> dl_hidden_link);
        GC_printf("Object: %p, Link:%p\n", real_ptr, real_link);
      }
    }
  }
Exemple #4
0
  /* Enqueue all remaining finalizers to be run - Assumes lock is held. */
  STATIC void GC_enqueue_all_finalizers(void)
  {
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr;
    register int i;
    int fo_size;

    fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
        curr_fo = GC_fo_head[i];
        prev_fo = 0;
      while (curr_fo != 0) {
          real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
          GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
          GC_set_mark_bit(real_ptr);

          /* Delete from hash table */
          next_fo = fo_next(curr_fo);
          if (prev_fo == 0) {
              GC_fo_head[i] = next_fo;
          } else {
              fo_set_next(prev_fo, next_fo);
          }
          GC_fo_entries--;

          /* Add to list of objects awaiting finalization.      */
          fo_set_next(curr_fo, GC_finalize_now);
          GC_finalize_now = curr_fo;

          /* unhide object pointer so any future collections will       */
          /* see it.                                                    */
          curr_fo -> fo_hidden_base =
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
          GC_bytes_finalized +=
                curr_fo -> fo_object_size + sizeof(struct finalizable_object);
          curr_fo = next_fo;
        }
    }
  }
Exemple #5
0
 STATIC void GC_clear_togglerefs(void)
 {
   int i;
   for (i = 0; i < GC_toggleref_array_size; ++i) {
     if ((GC_toggleref_arr[i].weak_ref & 1) != 0) {
       if (!GC_is_marked(GC_REVEAL_POINTER(GC_toggleref_arr[i].weak_ref))) {
         GC_toggleref_arr[i].weak_ref = 0;
       } else {
         /* No need to copy, BDWGC is a non-moving collector.    */
       }
     }
   }
 }
Exemple #6
0
GC_INLINE void GC_remove_dangling_disappearing_links(
                                struct dl_hashtbl_s* dl_hashtbl)
{
    struct disappearing_link *curr, *prev, *next;
    ptr_t real_link;

    ITERATE_DL_HASHTBL_BEGIN(dl_hashtbl, curr, prev)
        real_link = GC_base(GC_REVEAL_POINTER(curr -> dl_hidden_link));
        if (NULL != real_link && !GC_is_marked(real_link)) {
            GC_clear_mark_bit(curr);
            DELETE_DL_HASHTBL_ENTRY(dl_hashtbl, curr, prev, next);
        }
    ITERATE_DL_HASHTBL_END(curr, prev)
}
Exemple #7
0
  /* Dest can be any address within a heap object.                      */
  GC_API GC_ref_kind GC_CALL GC_get_back_ptr_info(void *dest, void **base_p,
                                                  size_t *offset_p)
  {
    oh * hdr = (oh *)GC_base(dest);
    ptr_t bp;
    ptr_t bp_base;

#   ifdef LINT2
      /* Explicitly instruct the code analysis tool that                */
      /* GC_get_back_ptr_info is not expected to be called with an      */
      /* incorrect "dest" value.                                        */
      if (!hdr) ABORT("Invalid GC_get_back_ptr_info argument");
#   endif
    if (!GC_HAS_DEBUG_INFO((ptr_t) hdr)) return GC_NO_SPACE;
    bp = GC_REVEAL_POINTER(hdr -> oh_back_ptr);
    if (MARKED_FOR_FINALIZATION == bp) return GC_FINALIZER_REFD;
    if (MARKED_FROM_REGISTER == bp) return GC_REFD_FROM_REG;
    if (NOT_MARKED == bp) return GC_UNREFERENCED;
#   if ALIGNMENT == 1
      /* Heuristically try to fix off by 1 errors we introduced by      */
      /* insisting on even addresses.                                   */
      {
        ptr_t alternate_ptr = bp + 1;
        ptr_t target = *(ptr_t *)bp;
        ptr_t alternate_target = *(ptr_t *)alternate_ptr;

        if (alternate_target >= GC_least_plausible_heap_addr
            && alternate_target <= GC_greatest_plausible_heap_addr
            && (target < GC_least_plausible_heap_addr
                || target > GC_greatest_plausible_heap_addr)) {
            bp = alternate_ptr;
        }
      }
#   endif
    bp_base = GC_base(bp);
    if (0 == bp_base) {
      *base_p = bp;
      *offset_p = 0;
      return GC_REFD_FROM_ROOT;
    } else {
      if (GC_HAS_DEBUG_INFO(bp_base)) bp_base += sizeof(oh);
      *base_p = bp_base;
      *offset_p = bp - bp_base;
      return GC_REFD_FROM_HEAP;
    }
  }
Exemple #8
0
/* update both *table and *log_size_ptr.  Lock is held.                 */
STATIC void GC_grow_table(struct hash_chain_entry ***table,
                          signed_word *log_size_ptr)
{
    register word i;
    register struct hash_chain_entry *p;
    signed_word log_old_size = *log_size_ptr;
    signed_word log_new_size = log_old_size + 1;
    word old_size = log_old_size == -1 ? 0 : (word)1 << log_old_size;
    word new_size = (word)1 << log_new_size;
    /* FIXME: Power of 2 size often gets rounded up to one more page. */
    struct hash_chain_entry **new_table;

    GC_ASSERT(I_HOLD_LOCK());
    new_table = (struct hash_chain_entry **)
                    GC_INTERNAL_MALLOC_IGNORE_OFF_PAGE(
                        (size_t)new_size * sizeof(struct hash_chain_entry *),
                        NORMAL);
    if (new_table == 0) {
        if (*table == 0) {
            ABORT("Insufficient space for initial table allocation");
        } else {
            return;
        }
    }
    for (i = 0; i < old_size; i++) {
      p = (*table)[i];
      while (p != 0) {
        ptr_t real_key = GC_REVEAL_POINTER(p -> hidden_key);
        struct hash_chain_entry *next = p -> next;
        size_t new_hash = HASH3(real_key, new_size, log_new_size);

        p -> next = new_table[new_hash];
        new_table[new_hash] = p;
        p = next;
      }
    }
    *log_size_ptr = log_new_size;
    *table = new_table;
}
Exemple #9
0
  void GC_dump_finalization(void)
  {
    struct finalizable_object * curr_fo;
    size_t fo_size = log_fo_table_size == -1 ? 0 : 1 << log_fo_table_size;
    ptr_t real_ptr;
    size_t i;

    GC_printf("Disappearing (short) links:\n");
    GC_dump_finalization_links(&GC_dl_hashtbl);
#   ifndef GC_LONG_REFS_NOT_NEEDED
      GC_printf("Disappearing long links:\n");
      GC_dump_finalization_links(&GC_ll_hashtbl);
#   endif
    GC_printf("Finalizers:\n");
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = GC_fo_head[i]; curr_fo != 0;
           curr_fo = fo_next(curr_fo)) {
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        GC_printf("Finalizable object: %p\n", real_ptr);
      }
    }
  }
Exemple #10
0
  GC_INNER void GC_process_togglerefs(void)
  {
    int i;
    int new_size = 0;

    GC_ASSERT(I_HOLD_LOCK());
    for (i = 0; i < GC_toggleref_array_size; ++i) {
      GCToggleRef r = GC_toggleref_arr[i];
      void *obj = r.strong_ref;

      if (((word)obj & 1) != 0) {
        obj = GC_REVEAL_POINTER(r.weak_ref);
      }
      if (NULL == obj) {
        continue;
      }
      switch (GC_toggleref_callback(obj)) {
      case GC_TOGGLE_REF_DROP:
        break;
      case GC_TOGGLE_REF_STRONG:
        GC_toggleref_arr[new_size++].strong_ref = obj;
        break;
      case GC_TOGGLE_REF_WEAK:
        GC_toggleref_arr[new_size++].weak_ref = GC_HIDE_POINTER(obj);
        break;
      default:
        ABORT("Bad toggle-ref status returned by callback");
      }
    }

    if (new_size < GC_toggleref_array_size) {
      BZERO(&GC_toggleref_arr[new_size],
            (GC_toggleref_array_size - new_size) * sizeof(GCToggleRef));
      GC_toggleref_array_size = new_size;
    }
  }
Exemple #11
0
/* enqueued for finalization.                                           */
GC_INNER void GC_finalize(void)
{
    struct finalizable_object * curr_fo, * prev_fo, * next_fo;
    ptr_t real_ptr;
    size_t i;
    size_t fo_size = log_fo_table_size == -1 ? 0 :
                                (size_t)1 << log_fo_table_size;

#   ifndef SMALL_CONFIG
      /* Save current GC_[dl/ll]_entries value for stats printing */
      GC_old_dl_entries = GC_dl_hashtbl.entries;
#     ifndef GC_LONG_REFS_NOT_NEEDED
        GC_old_ll_entries = GC_ll_hashtbl.entries;
#     endif
#   endif

#   ifndef GC_TOGGLE_REFS_NOT_NEEDED
      GC_mark_togglerefs();
#   endif
    GC_make_disappearing_links_disappear(&GC_dl_hashtbl);

  /* Mark all objects reachable via chains of 1 or more pointers        */
  /* from finalizable objects.                                          */
    GC_ASSERT(GC_mark_state == MS_NONE);
    for (i = 0; i < fo_size; i++) {
      for (curr_fo = GC_fnlz_roots.fo_head[i];
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
        GC_ASSERT(GC_size(curr_fo) >= sizeof(struct finalizable_object));
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            GC_MARKED_FOR_FINALIZATION(real_ptr);
            GC_MARK_FO(real_ptr, curr_fo -> fo_mark_proc);
            if (GC_is_marked(real_ptr)) {
                WARN("Finalization cycle involving %p\n", real_ptr);
            }
        }
      }
    }
  /* Enqueue for finalization all objects that are still                */
  /* unreachable.                                                       */
    GC_bytes_finalized = 0;
    for (i = 0; i < fo_size; i++) {
      curr_fo = GC_fnlz_roots.fo_head[i];
      prev_fo = 0;
      while (curr_fo != 0) {
        real_ptr = GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
        if (!GC_is_marked(real_ptr)) {
            if (!GC_java_finalization) {
              GC_set_mark_bit(real_ptr);
            }
            /* Delete from hash table */
              next_fo = fo_next(curr_fo);
              if (NULL == prev_fo) {
                GC_fnlz_roots.fo_head[i] = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              GC_fo_entries--;
              if (GC_object_finalized_proc)
                GC_object_finalized_proc(real_ptr);

            /* Add to list of objects awaiting finalization.    */
              fo_set_next(curr_fo, GC_fnlz_roots.finalize_now);
              GC_fnlz_roots.finalize_now = curr_fo;
              /* unhide object pointer so any future collections will   */
              /* see it.                                                */
              curr_fo -> fo_hidden_base =
                        (word)GC_REVEAL_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized +=
                        curr_fo -> fo_object_size
                        + sizeof(struct finalizable_object);
            GC_ASSERT(GC_is_marked(GC_base(curr_fo)));
            curr_fo = next_fo;
        } else {
            prev_fo = curr_fo;
            curr_fo = fo_next(curr_fo);
        }
      }
    }

  if (GC_java_finalization) {
    /* make sure we mark everything reachable from objects finalized
       using the no_order mark_proc */
      for (curr_fo = GC_fnlz_roots.finalize_now;
           curr_fo != NULL; curr_fo = fo_next(curr_fo)) {
        real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
        if (!GC_is_marked(real_ptr)) {
            if (curr_fo -> fo_mark_proc == GC_null_finalize_mark_proc) {
                GC_MARK_FO(real_ptr, GC_normal_finalize_mark_proc);
            }
            if (curr_fo -> fo_mark_proc != GC_unreachable_finalize_mark_proc) {
                GC_set_mark_bit(real_ptr);
            }
        }
      }

    /* now revive finalize-when-unreachable objects reachable from
       other finalizable objects */
      if (need_unreachable_finalization) {
        curr_fo = GC_fnlz_roots.finalize_now;
        prev_fo = NULL;
        while (curr_fo != NULL) {
          next_fo = fo_next(curr_fo);
          if (curr_fo -> fo_mark_proc == GC_unreachable_finalize_mark_proc) {
            real_ptr = (ptr_t)curr_fo -> fo_hidden_base;
            if (!GC_is_marked(real_ptr)) {
              GC_set_mark_bit(real_ptr);
            } else {
              if (NULL == prev_fo) {
                GC_fnlz_roots.finalize_now = next_fo;
              } else {
                fo_set_next(prev_fo, next_fo);
              }
              curr_fo -> fo_hidden_base =
                                GC_HIDE_POINTER(curr_fo -> fo_hidden_base);
              GC_bytes_finalized -=
                  curr_fo->fo_object_size + sizeof(struct finalizable_object);

              i = HASH2(real_ptr, log_fo_table_size);
              fo_set_next(curr_fo, GC_fnlz_roots.fo_head[i]);
              GC_fo_entries++;
              GC_fnlz_roots.fo_head[i] = curr_fo;
              curr_fo = prev_fo;
            }
          }
          prev_fo = curr_fo;
          curr_fo = next_fo;
        }
      }
  }

  GC_remove_dangling_disappearing_links(&GC_dl_hashtbl);
# ifndef GC_TOGGLE_REFS_NOT_NEEDED
    GC_clear_togglerefs();
# endif
# ifndef GC_LONG_REFS_NOT_NEEDED
    GC_make_disappearing_links_disappear(&GC_ll_hashtbl);
    GC_remove_dangling_disappearing_links(&GC_ll_hashtbl);
# endif

  if (GC_fail_count) {
    /* Don't prevent running finalizers if there has been an allocation */
    /* failure recently.                                                */
#   ifdef THREADS
      GC_reset_finalizer_nested();
#   else
      GC_finalizer_nested = 0;
#   endif
  }
}