Exemple #1
0
  Object* ObjectMemory::allocate_object_mature(size_t bytes) {
    Object* obj;

    if(bytes > large_object_threshold) {
      obj = mark_sweep_->allocate(bytes, &collect_mature_now);
      if(collect_mature_now) {
        state->interrupts.set_perform_gc();
      }

#ifdef RBX_GC_STATS
    stats::GCStats::get()->large_objects++;
#endif

    } else {
      obj = immix_->allocate(bytes);
      if(collect_mature_now) {
        state->interrupts.set_perform_gc();
      }
    }

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during mature allocation\n";
    }
#endif

    obj->clear_fields(bytes);
    return obj;
  }
  Object* ObjectMemory::allocate_object_mature(size_t bytes) {

    Object* obj;

    if(bytes > large_object_threshold) {
      obj = mark_sweep_->allocate(bytes, &collect_mature_now);
      if(unlikely(!obj)) return NULL;
    } else {
      obj = immix_->allocate(bytes);

      if(unlikely(!obj)) {
        obj = mark_sweep_->allocate(bytes, &collect_mature_now);
      }

      gc_stats.mature_object_allocated(bytes);
    }

    if(collect_mature_now) shared_.gc_soon();

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during mature allocation\n";
    }
#endif

    return obj;
  }
Exemple #3
0
  Object* ObjectMemory::new_object_typed_enduring(Class* cls, size_t bytes, object_type type) {
#ifdef RBX_GC_STATS
    stats::GCStats::get()->mature_object_types[type]++;
#endif

    Object* obj = mark_sweep_->allocate(bytes, &collect_mature_now);
    if(collect_mature_now) {
      state->interrupts.set_perform_gc();
    }

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during enduring allocation\n";
    }
#endif

    obj->clear_fields(bytes);

#ifdef RBX_GC_STATS
    stats::GCStats::get()->large_objects++;
#endif

    obj->klass(this, cls);

    obj->set_obj_type(type);
    obj->set_requires_cleanup(type_info[type]->instances_need_cleanup);

    return obj;
  }
Exemple #4
0
  Object* ImmixGC::saw_object(Object* obj) {
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during immix scanning.\n";
    }

    immix::Address fwd = gc_.mark_address(immix::Address(obj), allocator_);
    return fwd.as<Object>();
  }
Exemple #5
0
  /**
   * Called for each object in the young generation that is seen during garbage
   * collection. An object is seen by scanning from the root objects to all
   * reachable objects. Therefore, only reachable objects will be seen, and
   * reachable objects may be seen more than once.
   *
   * @returns the new address for the object, so that the source reference can
   * be updated when the object has been moved.
   */
  Object* BakerGC::saw_object(Object* obj) {
    Object* copy;

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during baker collection\n";
    }
#endif

    if(!obj->reference_p()) return obj;

    if(!obj->young_object_p()) return obj;

    if(obj->forwarded_p()) return obj->forward();

    // This object is already in the next space, we don't want to
    // copy it again!
    if(next->contains_p(obj)) return obj;

    if(unlikely(obj->inc_age() >= lifetime_)) {
      copy = object_memory_->promote_object(obj);

      promoted_push(copy);
    } else if(likely(next->enough_space_p(
                obj->size_in_bytes(object_memory_->state())))) {
      copy = next->move_object(object_memory_->state(), obj);
      total_objects++;
    } else {
      copy_spills_++;
      copy = object_memory_->promote_object(obj);
      promoted_push(copy);
    }

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(copy)) {
      std::cout << "detected " << copy << " during baker collection (2)\n";
    }
#endif

    return copy;
  }
Exemple #6
0
    void drain_stack() {
      while(!stack_.empty()) {
        Object* obj = stack_.back();
        stack_.pop_back();

        if(watched_p(obj)) {
          std::cout << "detected " << obj << " in unmarking stack.\n";
        }

        visit_object(obj);
      }
    }
Exemple #7
0
    Object* call(Object* obj) {
      if(watched_p(obj)) {
        std::cout << "detected " << obj << " during unmarking.\n";
      }

      if(obj->reference_p() && obj->marked_p(object_memory_->mark())) {
        obj->clear_mark();
        stack_.push_back(obj);
      }

      return obj;
    }
Exemple #8
0
  Object* BakerGC::saw_object(Object* obj) {
    Object* copy;

    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during baker collection\n";
    }

    if(!obj->reference_p()) return obj;

    if(obj->zone != YoungObjectZone) return obj;

    if(obj->forwarded_p()) return obj->forward();

    // This object is already in the next space, we don't want to
    // copy it again!
    // TODO test this!
    if(next->contains_p(obj)) return obj;

    if(unlikely(obj->age++ >= lifetime)) {
      copy = object_memory->promote_object(obj);

      promoted_push(copy);
    } else if(likely(next->enough_space_p(obj->size_in_bytes(object_memory->state)))) {
      copy = next->copy_object(object_memory->state, obj);
      total_objects++;
    } else {
      copy = object_memory->promote_object(obj);
      promoted_push(copy);
    }

    if(watched_p(copy)) {
      std::cout << "detected " << copy << " during baker collection (2)\n";
    }

    obj->set_forward(copy);
    return copy;
  }
Exemple #9
0
  Object* ObjectMemory::promote_object(Object* obj) {
#ifdef RBX_GC_STATS
    stats::GCStats::get()->objects_promoted++;
#endif

    Object* copy = immix_->allocate(obj->size_in_bytes(state));

    copy->set_obj_type(obj->type_id());
    copy->initialize_full_state(state, obj, 0);

    if(watched_p(obj)) {
      std::cout << "detected object " << obj << " during promotion.\n";
    }

    return copy;
  }
Exemple #10
0
  Object* ImmixGC::saw_object(Object* obj) {
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during immix scanning.\n";
    }

    immix::Address fwd = gc_.mark_address(immix::Address(obj), allocator_);
    Object* copy = fwd.as<Object>();

    // Check and update an inflated header
    if(copy && copy != obj && obj->inflated_header_p()) {
      InflatedHeader* ih = obj->deflate_header();
      ih->reset_object(copy);
      copy->set_inflated_header(ih);
    }

    return copy;
  }
Exemple #11
0
  Object* ObjectMemory::allocate_object(size_t bytes) {
    objects_allocated++;
    bytes_allocated += bytes;

    Object* obj;

    if(unlikely(bytes > large_object_threshold)) {
      obj = mark_sweep_->allocate(bytes, &collect_mature_now);
      if(unlikely(!obj)) return NULL;

      if(collect_mature_now) {
        state->interrupts.set_perform_gc();
      }

#ifdef RBX_GC_STATS
    stats::GCStats::get()->large_objects++;
#endif

    } else {
      obj = young_->allocate(bytes, &collect_young_now);
      if(unlikely(obj == NULL)) {
        collect_young_now = true;
        state->interrupts.set_perform_gc();

        obj = immix_->allocate(bytes);

        if(unlikely(!obj)) {
          obj = mark_sweep_->allocate(bytes, &collect_mature_now);
        }

        if(collect_mature_now) {
          state->interrupts.set_perform_gc();
        }
      }
    }

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during allocation\n";
    }
#endif

    obj->clear_fields(bytes);
    return obj;
  }
Exemple #12
0
  Object* ImmixGC::saw_object(Object* obj) {
#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during immix scanning.\n";
    }
#endif

    if(!obj->reference_p()) return obj;

    memory::Address fwd = gc_.mark_address(memory::Address(obj), allocator_);
    Object* copy = fwd.as<Object>();

    // Check and update an inflated header
    if(copy && copy != obj) {
      obj->set_forward(copy);
    }

    return copy;
  }
Exemple #13
0
/**
 * Scans the specified Object +obj+ for references to other Objects, and
 * marks those Objects as reachable. Understands how to read the inside of
 * an Object and find all references located within. For each reference
 * found, it marks the object pointed to as live (which may trigger
 * movement of the object in a copying garbage collector), but does not
 * recursively scan into the referenced object (since such recursion could
 * be arbitrarily deep, depending on the object graph, and this could cause
 * the stack to blow up).
 * /param obj The Object to be scanned for references to other Objects.
 */
void GarbageCollector::scan_object(Object* obj) {
    Object* slot;

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
        std::cout << "detected " << obj << " during scan_object.\n";
    }
#endif

    // Check and update an inflated header
    if(obj->inflated_header_p()) {
        obj->inflated_header()->reset_object(obj);
    }

    slot = saw_object(obj->klass());
    if(slot) obj->klass(object_memory_, force_as<Class>(slot));

    if(obj->ivars()->reference_p()) {
        slot = saw_object(obj->ivars());
        if(slot) obj->ivars(object_memory_, slot);
    }

    // Handle Tuple directly, because it's so common
    if(Tuple* tup = try_as<Tuple>(obj)) {
        int size = tup->num_fields();

        for(int i = 0; i < size; i++) {
            slot = tup->field[i];
            if(slot->reference_p()) {
                slot = saw_object(slot);
                if(slot) {
                    tup->field[i] = slot;
                    object_memory_->write_barrier(tup, slot);
                }
            }
        }
    } else {
        TypeInfo* ti = object_memory_->type_info[obj->type_id()];

        ObjectMark mark(this);
        ti->mark(obj, mark);
    }
}
Exemple #14
0
  Object* ObjectMemory::allocate_object(size_t bytes) {

    Object* obj;

    if(unlikely(bytes > large_object_threshold)) {
      obj = mark_sweep_->allocate(bytes, &collect_mature_now);
      if(unlikely(!obj)) return NULL;

      state()->metrics().m.ruby_metrics.memory_immix_objects_total++;
      state()->metrics().m.ruby_metrics.memory_immix_bytes_total += bytes;

      if(collect_mature_now) shared_.gc_soon();

    } else {
      obj = young_->allocate(bytes, &collect_young_now);
      if(unlikely(obj == NULL)) {
        collect_young_now = true;
        shared_.gc_soon();

        obj = immix_->allocate(bytes);

        if(unlikely(!obj)) {
          obj = mark_sweep_->allocate(bytes, &collect_mature_now);
        }

        state()->metrics().m.ruby_metrics.memory_immix_objects_total++;
        state()->metrics().m.ruby_metrics.memory_immix_bytes_total += bytes;

        if(collect_mature_now) shared_.gc_soon();
      } else {
        state()->metrics().m.ruby_metrics.memory_young_objects_total++;
        state()->metrics().m.ruby_metrics.memory_young_bytes_total += bytes;
      }
    }

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during allocation\n";
    }
#endif

    return obj;
  }
Exemple #15
0
  Object* ObjectMemory::promote_object(Object* obj) {

    size_t sz = obj->size_in_bytes(root_state_);

    Object* copy = immix_->move_object(obj, sz);

    gc_stats.promoted_object_allocated(sz);
    if(unlikely(!copy)) {
      copy = mark_sweep_->move_object(obj, sz, &collect_mature_now);
    }

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected object " << obj << " during promotion.\n";
    }
#endif

    return copy;
  }
Exemple #16
0
  /**
   * Scans the specified Object +obj+ for references to other Objects, and
   * marks those Objects as reachable. Understands how to read the inside of
   * an Object and find all references located within. For each reference
   * found, it marks the object pointed to as live (which may trigger
   * movement of the object in a copying garbage collector), but does not
   * recursively scan into the referenced object (since such recursion could
   * be arbitrarily deep, depending on the object graph, and this could cause
   * the stack to blow up).
   * /param obj The Object to be scanned for references to other Objects.
   */
  void GarbageCollector::scan_object(Object* obj) {
#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during scan_object.\n";
    }
#endif
    // We set scanned here before we finish scanning the object.
    // This is done so we don't have a race condition while we're
    // scanning the object and another thread updates a field during
    // the phase where the object is partially scanned.
    scanned_object(obj);

    if(Object* klass = saw_object(obj->klass())) {
      obj->klass(object_memory_, force_as<Class>(klass));
    }

    if(obj->ivars()->reference_p()) {
      if(Object* ivars = saw_object(obj->ivars())) {
        obj->ivars(object_memory_, ivars);
      }
    }

    // Handle Tuple directly, because it's so common
    if(Tuple* tup = try_as<Tuple>(obj)) {
      native_int size = tup->num_fields();

      for(native_int i = 0; i < size; i++) {
        Object* slot = tup->field[i];
        if(slot->reference_p()) {
          if(Object* moved = saw_object(slot)) {
            tup->field[i] = moved;
            object_memory_->write_barrier(tup, moved);
          }
        }
      }
    } else {
      TypeInfo* ti = object_memory_->type_info[obj->type_id()];

      ObjectMark mark(this);
      ti->mark(obj, mark);
    }
  }
Exemple #17
0
  Object* ObjectMemory::new_object_typed_enduring_dirty(STATE, Class* cls, size_t bytes, object_type type) {
    utilities::thread::SpinLock::LockGuard guard(allocation_lock_);

    Object* obj = mark_sweep_->allocate(bytes, &collect_mature_now);
    gc_stats.mature_object_allocated(bytes);

    if(collect_mature_now) shared_.gc_soon();

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during enduring allocation\n";
    }
#endif

    obj->klass(this, cls);
    obj->ivars(this, cNil);
    obj->set_obj_type(type);

    return obj;
  }
Exemple #18
0
    /* Inline methods */
    Object* allocate(size_t bytes, bool *collect_now) {
      Object* obj;

#ifdef RBX_GC_STATS
      stats::GCStats::get()->young_bytes_allocated += bytes;
      stats::GCStats::get()->allocate_young.start();
#endif

      if(!current->enough_space_p(bytes)) {
#if 0
        if(!next->enough_space_p(bytes)) {
          return NULL;
        } else {
          total_objects++;
          obj = (Object*)next->allocate(bytes);
        }
#endif
        *collect_now = true;

#ifdef RBX_GC_STATS
      stats::GCStats::get()->allocate_young.stop();
#endif

        return NULL;
      } else {
        total_objects++;
        obj = (Object*)current->allocate(bytes);
      }

      if(watched_p(obj)) {
        std::cout << "detected " << obj << " during baker allocation.\n";
      }

      obj->init_header(YoungObjectZone, bytes);

#ifdef RBX_GC_STATS
      stats::GCStats::get()->allocate_young.stop();
#endif

      return obj;
    }
Exemple #19
0
  Object* ObjectMemory::allocate_object(size_t bytes) {

    Object* obj;

    if(unlikely(bytes > large_object_threshold)) {
      obj = mark_sweep_->allocate(bytes, &collect_mature_now);
      if(unlikely(!obj)) return NULL;

      gc_stats.mature_object_allocated(bytes);

      if(collect_mature_now) shared_.gc_soon();

    } else {
      obj = young_->allocate(bytes, &collect_young_now);
      if(unlikely(obj == NULL)) {
        collect_young_now = true;
        shared_.gc_soon();

        obj = immix_->allocate(bytes);

        if(unlikely(!obj)) {
          obj = mark_sweep_->allocate(bytes, &collect_mature_now);
        }

        gc_stats.mature_object_allocated(bytes);

        if(collect_mature_now) shared_.gc_soon();
      } else {
        gc_stats.young_object_allocated(bytes);
      }
    }

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during allocation\n";
    }
#endif

    obj->clear_fields(bytes);
    return obj;
  }
Exemple #20
0
    /**
     * Attempts to allocate an object of the specified size from the Eden heap.
     * Unlike allocate, the header of the returned object is not initialized.
     *
     * If there is insufficient space remaining, NULL is returned and the
     * limit_hit parameter is set to true.
     */
    Object* raw_allocate(size_t bytes, bool* limit_hit) {
      Object* obj;

      if(!eden->enough_space_p(bytes)) {
        return NULL;
      } else {
        obj = eden->allocate(bytes).as<Object>();

        if(eden->over_limit_p(obj)) {
          *limit_hit = true;
        }
      }

#ifdef ENABLE_OBJECT_WATCH
      if(watched_p(obj)) {
        std::cout << "detected " << obj << " during baker allocation.\n";
      }
#endif

      return obj;
    }
Exemple #21
0
  Object* ObjectMemory::promote_object(Object* obj) {

    size_t sz = obj->size_in_bytes(root_state_);

    Object* copy = immix_->move_object(obj, sz);

    state()->metrics().m.ruby_metrics.memory_promoted_objects_total++;
    state()->metrics().m.ruby_metrics.memory_promoted_bytes_total += sz;

    if(unlikely(!copy)) {
      copy = mark_sweep_->move_object(obj, sz, &collect_mature_now);
    }

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected object " << obj << " during promotion.\n";
    }
#endif

    copy->clear_mark();
    return copy;
  }
Exemple #22
0
    /* Inline methods */
    Object* raw_allocate(size_t bytes, bool* limit_hit) {
      Object* obj;

#ifdef RBX_GC_STATS
      stats::GCStats::get()->young_bytes_allocated += bytes;
      stats::GCStats::get()->allocate_young.start();
#endif

      if(!eden.enough_space_p(bytes)) {
#ifdef RBX_GC_STATS
       stats::GCStats::get()->allocate_young.stop();
#endif
        return NULL;
      } else {
        lock_.lock();

        total_objects++;
        obj = (Object*)eden.allocate(bytes);

        lock_.unlock();

        if(eden.over_limit_p(obj)) {
          *limit_hit = true;
        }
      }

#ifdef ENABLE_OBJECT_WATCH
      if(watched_p(obj)) {
        std::cout << "detected " << obj << " during baker allocation.\n";
      }
#endif

#ifdef RBX_GC_STATS
      stats::GCStats::get()->allocate_young.stop();
#endif

      return obj;
    }
Exemple #23
0
  Object* ObjectMemory::new_object_typed_enduring_dirty(STATE, Class* cls, size_t bytes, object_type type) {
    utilities::thread::SpinLock::LockGuard guard(allocation_lock_);

    Object* obj = mark_sweep_->allocate(bytes, &collect_mature_now);
    if(unlikely(!obj)) return NULL;

    state->vm()->metrics().m.ruby_metrics.memory_immix_objects_total++;
    state->vm()->metrics().m.ruby_metrics.memory_immix_bytes_total += bytes;

    if(collect_mature_now) shared_.gc_soon();

#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during enduring allocation\n";
    }
#endif

    obj->set_obj_type(type);
    obj->klass(this, cls);
    obj->ivars(this, cNil);

    return obj;
  }
Exemple #24
0
  Object* ImmixGC::saw_object(Object* obj) {
#ifdef ENABLE_OBJECT_WATCH
    if(watched_p(obj)) {
      std::cout << "detected " << obj << " during immix scanning.\n";
    }
#endif

    if(!obj->reference_p()) return obj;

    memory::Address fwd = gc_.mark_address(memory::Address(obj), allocator_);
    Object* copy = fwd.as<Object>();

    // Check and update an inflated header
    if(copy && copy != obj && obj->inflated_header_p()) {
      InflatedHeader* ih = obj->deflate_header();
      ih->reset_object(copy);
      if(!copy->set_inflated_header(ih)) {
        rubinius::bug("Massive IMMIX inflated header screwup.");
      }
    }

    return copy;
  }
Exemple #25
0
    /**
     * Attempts to allocate an object of the specified size from the Eden heap.
     *
     * If successful, the returned object's header is initiliazed to the young
     * generation.
     *
     * If there is insufficient space remaining, NULL is returned and the
     * limit_hit parameter is set to true.
     */
    Object* allocate(size_t bytes, bool* limit_hit) {
      Object* obj;

      if(!eden.enough_space_p(bytes)) {
        return NULL;
      } else {
        total_objects++;
        obj = eden.allocate(bytes).as<Object>();

        if(eden.over_limit_p(obj)) {
          *limit_hit = true;
        }
      }

#ifdef ENABLE_OBJECT_WATCH
      if(watched_p(obj)) {
        std::cout << "detected " << obj << " during baker allocation.\n";
      }
#endif

      obj->init_header(YoungObjectZone, InvalidType);

      return obj;
    }
Exemple #26
0
  Object* ObjectMemory::promote_object(Object* obj) {
#ifdef RBX_GC_STATS
    stats::GCStats::get()->objects_promoted++;
#endif

    objects_allocated++;
    size_t sz = obj->size_in_bytes(state);
    bytes_allocated += sz;

    Object* copy = immix_->allocate(sz);

    if(unlikely(!copy)) {
      copy = mark_sweep_->allocate(sz, &collect_mature_now);
    }

    copy->set_obj_type(obj->type_id());
    copy->initialize_full_state(state, obj, 0);

    if(watched_p(obj)) {
      std::cout << "detected object " << obj << " during promotion.\n";
    }

    return copy;
  }
Exemple #27
0
    Object* allocate(size_t bytes) {
      Object* obj;

#ifdef RBX_GC_STATS
      stats::GCStats::get()->young_bytes_allocated += bytes;
      stats::GCStats::get()->allocate_young.start();
#endif

      if(!eden.enough_space_p(bytes)) {
#ifdef RBX_GC_STATS
       stats::GCStats::get()->allocate_young.stop();
#endif
        return NULL;
      } else {
        lock_.lock();

        total_objects++;
        obj = (Object*)eden.allocate(bytes);

        lock_.unlock();
      }

#ifdef ENABLE_OBJECT_WATCH
      if(watched_p(obj)) {
        std::cout << "detected " << obj << " during baker allocation.\n";
      }
#endif

      obj->init_header(YoungObjectZone, InvalidType);

#ifdef RBX_GC_STATS
      stats::GCStats::get()->allocate_young.stop();
#endif

      return obj;
    }
Exemple #28
0
  /* Perform garbage collection on the young objects. */
  void BakerGC::collect(GCData& data) {
#ifdef RBX_GC_STATS
    stats::GCStats::get()->bytes_copied.start();
    stats::GCStats::get()->objects_copied.start();
    stats::GCStats::get()->objects_promoted.start();
    stats::GCStats::get()->collect_young.start();
#endif

    Object* tmp;
    ObjectArray *current_rs = object_memory->remember_set;

    object_memory->remember_set = new ObjectArray(0);
    total_objects = 0;

    // Tracks all objects that we promoted during this run, so
    // we can scan them at the end.
    promoted_ = new ObjectArray(0);

    promoted_current = promoted_insert = promoted_->begin();

    for(ObjectArray::iterator oi = current_rs->begin();
        oi != current_rs->end();
        ++oi) {
      tmp = *oi;
      // unremember_object throws a NULL in to remove an object
      // so we don't have to compact the set in unremember
      if(tmp) {
        assert(tmp->zone == MatureObjectZone);
        assert(!tmp->forwarded_p());

        // Remove the Remember bit, since we're clearing the set.
        tmp->clear_remember();
        scan_object(tmp);
      }
    }

    delete current_rs;

    for(Roots::Iterator i(data.roots()); i.more(); i.advance()) {
      tmp = i->get();
      if(tmp->reference_p() && tmp->young_object_p()) {
        i->set(saw_object(tmp));
      }
    }

    for(VariableRootBuffers::Iterator i(data.variable_buffers());
        i.more(); i.advance()) {
      Object*** buffer = i->buffer();
      for(int idx = 0; idx < i->size(); idx++) {
        Object** var = buffer[idx];
        Object* tmp = *var;

        if(tmp->reference_p() && tmp->young_object_p()) {
          *var = saw_object(tmp);
        }
      }
    }

    // Walk all the call frames
    for(CallFrameLocationList::iterator i = data.call_frames().begin();
        i != data.call_frames().end();
        i++) {
      CallFrame** loc = *i;
      walk_call_frame(*loc);
    }

    /* Ok, now handle all promoted objects. This is setup a little weird
     * so I should explain.
     *
     * We want to scan each promoted object. But this scanning will likely
     * cause more objects to be promoted. Adding to an ObjectArray that your
     * iterating over blows up the iterators, so instead we rotate the
     * current promoted set out as we iterator over it, and stick an
     * empty ObjectArray in.
     *
     * This way, when there are no more objects that are promoted, the last
     * ObjectArray will be empty.
     * */

    promoted_current = promoted_insert = promoted_->begin();

    while(promoted_->size() > 0 || !fully_scanned_p()) {
      if(promoted_->size() > 0) {
        for(;promoted_current != promoted_->end();
            ++promoted_current) {
          tmp = *promoted_current;
          assert(tmp->zone == MatureObjectZone);
          scan_object(tmp);
          if(watched_p(tmp)) {
            std::cout << "detected " << tmp << " during scan of promoted objects.\n";
          }
        }

        promoted_->resize(promoted_insert - promoted_->begin());
        promoted_current = promoted_insert = promoted_->begin();

      }

      /* As we're handling promoted objects, also handle unscanned objects.
       * Scanning these unscanned objects (via the scan pointer) will
       * cause more promotions. */
      copy_unscanned();
    }

    assert(promoted_->size() == 0);

    delete promoted_;
    promoted_ = NULL;

    assert(fully_scanned_p());

    /* Another than is going to be found is found now, so we go back and
     * look at everything in current and call delete_object() on anything
     * thats not been forwarded. */
    find_lost_souls();

    /* Check any weakrefs and replace dead objects with nil*/
    clean_weakrefs(true);

    /* Swap the 2 halves */
    Heap *x = next;
    next = current;
    current = x;
    next->reset();

#ifdef RBX_GC_STATS
    stats::GCStats::get()->collect_young.stop();
    stats::GCStats::get()->objects_copied.stop();
    stats::GCStats::get()->objects_promoted.stop();
    stats::GCStats::get()->bytes_copied.stop();
#endif
  }