Example #1
0
  // HACK todo test this!
  void MarkSweepGC::clean_weakrefs() {
    if(!weak_refs) return;

    for(ObjectArray::iterator i = weak_refs->begin();
        i != weak_refs->end();
        i++) {
      // ATM, only a Tuple can be marked weak.
      Tuple* tup = as<Tuple>(*i);
      for(size_t ti = 0; ti < tup->num_fields(); ti++) {
        Object* obj = tup->at(object_memory->state, ti);

        if(!obj->reference_p()) continue;

        if(obj->young_object_p()) {
          if(!obj->marked_p()) {
            tup->field[ti] = Qnil;
          }
        } else {
          Entry *entry = find_entry(obj);
          if(!entry->marked_p()) {
            tup->field[ti] = Qnil;
          }
        }
      }
    }

    delete weak_refs;
    weak_refs = NULL;
  }
Example #2
0
  void GarbageCollector::clean_weakrefs(bool check_forwards) {
    if(!weak_refs_) return;

    for(ObjectArray::iterator i = weak_refs_->begin();
        i != weak_refs_->end();
        i++) {
      WeakRef* ref = try_as<WeakRef>(*i);
      if(!ref) continue; // WTF.

      Object* obj = ref->object();
      if(!obj->reference_p()) continue;

      if(check_forwards) {
        if(obj->young_object_p()) {
          if(!obj->forwarded_p()) {
            ref->set_object(object_memory_, Qnil);
          } else {
            ref->set_object(object_memory_, obj->forward());
          }
        }
      } else if(!obj->marked_p(object_memory_->mark())) {
        ref->set_object(object_memory_, Qnil);
      }
    }

    delete weak_refs_;
    weak_refs_ = NULL;
  }
Example #3
0
  void GarbageCollector::clean_weakrefs(bool check_forwards) {
    if(!weak_refs_) return;

    for(ObjectArray::iterator i = weak_refs_->begin();
        i != weak_refs_->end();
        ++i) {
      if(!*i) continue; // Object was removed during young gc.
      WeakRef* ref = try_as<WeakRef>(*i);
      if(!ref) continue; // Other type for some reason?

      Object* obj = ref->object();
      if(!obj->reference_p()) continue;

      if(check_forwards) {
        if(obj->young_object_p()) {
          if(!obj->forwarded_p()) {
            ref->set_object(object_memory_, cNil);
          } else {
            ref->set_object(object_memory_, obj->forward());
          }
        }
      } else if(!obj->marked_p(object_memory_->mark())) {
        ref->set_object(object_memory_, cNil);
      }
    }

    delete weak_refs_;
    weak_refs_ = NULL;
  }
Example #4
0
 void GarbageCollector::clean_locked_objects(ManagedThread* thr, bool young_only) {
   LockedObjects& los = thr->locked_objects();
   for(LockedObjects::iterator i = los.begin();
       i != los.end();) {
     Object* obj = static_cast<Object*>(*i);
     if(young_only) {
       if(obj->young_object_p()) {
         if(obj->forwarded_p()) {
           *i = obj->forward();
           ++i;
         } else {
           i = los.erase(i);
         }
       } else {
         ++i;
       }
     } else {
       if(!obj->marked_p(object_memory_->mark())) {
         i = los.erase(i);
       } else {
         ++i;
       }
     }
   }
 }
Example #5
0
  inline void Object::write_barrier(VM* vm, void* ptr) {
    Object* obj = reinterpret_cast<Object*>(ptr);
    if(!obj->reference_p() ||
        this->young_object_p() ||
        !obj->young_object_p()) return;

    inline_write_barrier_passed(vm, ptr);
  }
Example #6
0
  void GlobalCache::prune_young() {
    cache_entry* entry;
    for(size_t i = 0; i < CPU_CACHE_SIZE; i++) {
      entry = &entries[i];
      bool clear = false;

      Object* klass = reinterpret_cast<Object*>(entry->klass);
      if(!klass) continue;

      if(klass->young_object_p()) {
        if(klass->forwarded_p()) {
          Module* fwd = (Module*)klass->forward();
          entry->klass = fwd;
        } else {
          clear = true;
        }
      }

      Object* mod = reinterpret_cast<Object*>(entry->module);
      if(mod->young_object_p()) {
        if(mod->forwarded_p()) {
          entry->module = (Module*)mod->forward();
        } else {
          clear = true;
        }
      }

      Object* exec = reinterpret_cast<Object*>(entry->method);
      if(exec->young_object_p()) {
        if(exec->forwarded_p()) {
          entry->method = (Executable*)exec->forward();
        } else {
          clear = true;
        }
      }

      if(clear) {
        entry->klass = 0;
        entry->name = 0;
        entry->module = 0;
        entry->is_public = true;
        entry->method_missing = false;
      }
    }
  }
Example #7
0
  /**
   * Scans the remaining unscanned portion of the Next heap.
   */
  void BakerGC::copy_unscanned() {
    Object* iobj = next->next_unscanned(object_memory_->state());

    while(iobj) {
      assert(iobj->young_object_p());
      if(!iobj->forwarded_p()) scan_object(iobj);
      iobj = next->next_unscanned(object_memory_->state());
    }
  }
Example #8
0
  void ObjectMemory::prune_handles(capi::Handles* handles, bool check_forwards) {
    capi::Handle* handle = handles->front();
    capi::Handle* current;

    int total = 0;
    int count = 0;

    while(handle) {
      current = handle;
      handle = static_cast<capi::Handle*>(handle->next());

      Object* obj = current->object();
      total++;

      // Strong references will already have been updated.
      if(!current->weak_p()) {
        if(check_forwards) assert(!obj->forwarded_p());
        assert(obj->inflated_header()->object() == obj);
      } else if(check_forwards) {
        if(obj->young_object_p()) {

          // A weakref pointing to a valid young object
          //
          // TODO this only works because we run prune_handles right after
          // a collection. In this state, valid objects are only in current.
          if(young_->in_current_p(obj)) {
            continue;

          // A weakref pointing to a forwarded young object
          } else if(obj->forwarded_p()) {
            current->set_object(obj->forward());
            assert(current->object()->inflated_header_p());
            assert(current->object()->inflated_header()->object() == current->object());

          // A weakref pointing to a dead young object
          } else {
            count++;
            handles->remove(current);
            delete current;
          }
        }

      // A weakref pointing to a dead mature object
      } else if(!obj->marked_p(mark())) {
        count++;
        handles->remove(current);
        delete current;
      } else {
        assert(obj->inflated_header()->object() == obj);
      }
    }

    // std::cout << "Pruned " << count << " handles, " << total << "/" << handles->size() << " total.\n";
  }
Example #9
0
  void GlobalCache::prune_young() {
    for(size_t i = 0; i < CPU_CACHE_SIZE; i++) {
      CacheEntry* entry = &entries[i];
      bool clear = false;

      Object* klass = entry->klass;
      if(!klass) continue;

      if(klass->young_object_p()) {
        if(klass->forwarded_p()) {
          Module* fwd = force_as<Module>(klass->forward());
          entry->klass = fwd;
        } else {
          clear = true;
        }
      }

      Object* mod = entry->module;
      if(mod->young_object_p()) {
        if(mod->forwarded_p()) {
          entry->module = force_as<Module>(mod->forward());
        } else {
          clear = true;
        }
      }

      Object* exec = entry->method;
      if(exec->young_object_p()) {
        if(exec->forwarded_p()) {
          entry->method = force_as<Executable>(exec->forward());
        } else {
          clear = true;
        }
      }

      if(clear) {
        entry_names[i] = NULL;
        entry->clear();
      }
    }
  }
Example #10
0
 void BakerGC::update_mature_mark_stack() {
   immix::MarkStack& stack = object_memory_->mature_mark_stack();
   for(immix::MarkStack::iterator i = stack.begin(); i != stack.end(); ++i) {
     Object* obj = (*i).as<Object>();
     if(obj && obj->young_object_p()) {
       if(obj->forwarded_p()) {
         *i = obj->forward();
       } else {
         *i = memory::Address::null();
       }
     }
   }
 }
Example #11
0
 memory::Address update_pointer(memory::Address addr) {
   Object* obj = addr.as<Object>();
   if(!obj) return memory::Address::null();
   if(obj->young_object_p()) {
     if(obj->forwarded_p()) return obj->forward();
     return memory::Address::null();
   } else {
     // we must remember this because it might
     // contain references to young gen objects
     object_memory_->remember_object(obj);
   }
   return addr;
 }
Example #12
0
void GarbageCollector::scan(RootBuffers& buffers, bool young_only) {
    for(RootBuffers::Iterator i(buffers);
            i.more();
            i.advance())
    {
        Object** buffer = i->buffer();
        for(int idx = 0; idx < i->size(); idx++) {
            Object* tmp = buffer[idx];

            if(tmp->reference_p() && (!young_only || tmp->young_object_p())) {
                buffer[idx] = saw_object(tmp);
            }
        }
    }
}
Example #13
0
void GarbageCollector::scan(VariableRootBuffers& buffers, bool young_only) {
    for(VariableRootBuffers::Iterator vi(buffers);
            vi.more();
            vi.advance())
    {
        Object*** buffer = vi->buffer();
        for(int idx = 0; idx < vi->size(); idx++) {
            Object** var = buffer[idx];
            Object* tmp = *var;

            if(tmp->reference_p() && (!young_only || tmp->young_object_p())) {
                *var = saw_object(tmp);
            }
        }
    }
}
Example #14
0
  void BakerGC::update_mark_set() {
    // Update the marked set and remove young not forwarded objects
    ObjectArray* marked_set = object_memory_->marked_set();

    for(ObjectArray::iterator oi = marked_set->begin();
        oi != marked_set->end(); ++oi) {
      Object* obj = *oi;
      if(!obj) continue; // Already removed during previous cycle
      if(obj->young_object_p()) {
        if(obj->forwarded_p()) {
          *oi = obj->forward();
        } else {
          *oi = NULL;
        }
      }
    }
  }
Example #15
0
      bool mark_address(immix::Address addr, immix::MarkStack& ms) {
        Object* obj = addr.as<Object>();

        if(obj->marked_p()) {
          if(obj->marked_p(gc_->which_mark())) return false;
          assert(0 && "invalid mark detectet!\n");
        }
        obj->mark(gc_->which_mark());

        ms.push_back(addr);

        // If this is a young object, let the GC know not to try and mark
        // the block it's in.
        if(obj->young_object_p() || !obj->in_immix_p()) {
          return false;
        }
        return true;
      }
Example #16
0
  void GarbageCollector::scan(VariableRootBuffers& buffers,
                              bool young_only, AddressDisplacement* offset)
  {
    VariableRootBuffer* vrb = displace(buffers.front(), offset);

    while(vrb) {
      Object*** buffer = displace(vrb->buffer(), offset);
      for(int idx = 0; idx < vrb->size(); idx++) {
        Object** var = displace(buffer[idx], offset);
        Object* tmp = *var;

        if(tmp && tmp->reference_p() && (!young_only || tmp->young_object_p())) {
          *var = saw_object(tmp);
        }
      }

      vrb = displace((VariableRootBuffer*)vrb->next(), offset);
    }
  }
Example #17
0
  void BakerGC::update_weak_refs_set() {
    // Update the weakref set for mature GC and remove young not forwarded objects
    ObjectArray* weak_refs_set = object_memory_->weak_refs_set();

    if(weak_refs_set) {
      for(ObjectArray::iterator oi = weak_refs_set->begin();
          oi != weak_refs_set->end(); ++oi) {
        Object* obj = *oi;
        if(!obj) continue; // Already removed during previous cycle
        if(obj->young_object_p()) {
          if(obj->forwarded_p()) {
            *oi = obj->forward();
          } else {
            *oi = NULL;
          }
        }
      }
    }
  }
Example #18
0
  void BakerGC::walk_finalizers() {
    FinalizerHandler* fh = object_memory_->finalizer_handler();
    if(!fh) return;

    for(FinalizerHandler::iterator i = fh->begin();
        !i.end();
        /* advance is handled in the loop */)
    {
      FinalizeObject& fi = i.current();
      bool live = true;

      if(fi.object->young_object_p()) {
        live = fi.object->forwarded_p();
        if(Object* fwd = saw_object(fi.object)) {
          fi.object = fwd;
        }
      } else {
        // If this object is mature, scan it. This
        // means that any young objects it refers to are properly
        // GC'ed and kept alive if necessary
        scan_object(fi.object);
      }

      Object* fin = fi.ruby_finalizer;
      if(fin && fin->reference_p()) {
        if(fin->young_object_p()) {
          if(Object* fwd = saw_object(fin)) {
            fi.ruby_finalizer = fwd;
          }
        } else {
          // If this object is mature, scan it. This
          // means that any young objects it refers to are properly
          // GC'ed and kept alive if necessary
          scan_object(fin);
        }
      }

      i.next(live);
    }
  }
Example #19
0
  void test_collection() {
    std::map<int, Object*> objs;

    int index = 0;
    memory::Root* root = static_cast<memory::Root*>(state->globals().roots.head());
    while(root) {
      Object* tmp = root->get();
      if(tmp->reference_p() && tmp->young_object_p()) {
        objs[index] = tmp;
      }
      index++;

      root = static_cast<memory::Root*>(root->next());
    }

    //std::cout << "young: " << index << " (" <<
    //  state->om->young.total_objects << ")" << std::endl;

    memory::GCData gc_data(state->vm());
    state->memory()->collect_young(state, &gc_data);

    index = 0;
    root = static_cast<memory::Root*>(state->globals().roots.head());
    while(root) {
      if(Object* tmp = objs[index]) {
        TS_ASSERT(root->get() != tmp);
      }
      index++;

      root = static_cast<memory::Root*>(root->next());
    }

    memory::HeapDebug hd(state->memory());
    hd.walk(state->globals().roots);

    //std::cout << "total: " << hd.seen_objects << " (" <<
    //  state->om->young.total_objects << ")" << std::endl;
  }
Example #20
0
  /**
   * Perform garbage collection on the young objects.
   */
  void BakerGC::collect(GCData& data, YoungCollectStats* stats) {

#ifdef HAVE_VALGRIND_H
    VALGRIND_MAKE_MEM_DEFINED(next->start().as_int(), next->size());
    VALGRIND_MAKE_MEM_DEFINED(current->start().as_int(), current->size());
#endif

    Object* tmp;
    ObjectArray *current_rs = object_memory_->swap_remember_set();

    total_objects = 0;

    copy_spills_ = 0;
    reset_promoted();

    // Start by copying objects in the remember set
    for(ObjectArray::iterator oi = current_rs->begin();
        oi != current_rs->end();
        ++oi) {
      tmp = *oi;
      // unremember_object throws a NULL in to remove an object
      // so we don't have to compact the set in unremember
      if(tmp) {
        // assert(tmp->mature_object_p());
        // assert(!tmp->forwarded_p());

        // Remove the Remember bit, since we're clearing the set.
        tmp->clear_remember();
        scan_object(tmp);
      }
    }

    delete current_rs;

    for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin();
        wbi != object_memory_->aux_barriers().end();
        ++wbi) {
      gc::WriteBarrier* wb = *wbi;
      ObjectArray* rs = wb->swap_remember_set();
      for(ObjectArray::iterator oi = rs->begin();
          oi != rs->end();
          ++oi) {
        tmp = *oi;

        if(tmp) {
          tmp->clear_remember();
          scan_object(tmp);
        }
      }

      delete rs;
    }

    for(Roots::Iterator i(data.roots()); i.more(); i.advance()) {
      i->set(saw_object(i->get()));
    }

    if(data.threads()) {
      for(std::list<ManagedThread*>::iterator i = data.threads()->begin();
          i != data.threads()->end();
          ++i) {
        scan(*i, true);
      }
    }

    for(Allocator<capi::Handle>::Iterator i(data.handles()->allocator()); i.more(); i.advance()) {
      if(!i->in_use_p()) continue;

      if(!i->weak_p() && i->object()->young_object_p()) {
        i->set_object(saw_object(i->object()));

      // Users manipulate values accessible from the data* within an
      // RData without running a write barrier. Thusly if we see a mature
      // rdata, we must always scan it because it could contain
      // young pointers.
      } else if(!i->object()->young_object_p() && i->is_rdata()) {
        scan_object(i->object());
      }

      assert(i->object()->type_id() > InvalidType && i->object()->type_id() < LastObjectType);
    }

    std::list<capi::GlobalHandle*>* gh = data.global_handle_locations();

    if(gh) {
      for(std::list<capi::GlobalHandle*>::iterator i = gh->begin();
          i != gh->end();
          ++i) {
        capi::GlobalHandle* global_handle = *i;
        capi::Handle** loc = global_handle->handle();
        if(capi::Handle* hdl = *loc) {
          if(!REFERENCE_P(hdl)) continue;
          if(hdl->valid_p()) {
            Object* obj = hdl->object();
            if(obj && obj->reference_p() && obj->young_object_p()) {
              hdl->set_object(saw_object(obj));
            }
          } else {
            std::cerr << "Detected bad handle checking global capi handles\n";
          }
        }
      }
    }

#ifdef ENABLE_LLVM
    if(LLVMState* ls = data.llvm_state()) ls->gc_scan(this);
#endif

    // Handle all promotions to non-young space that occurred.
    handle_promotions();

    assert(fully_scanned_p());
    // We're now done seeing the entire object graph of normal, live references.
    // Now we get to handle the unusual references, like finalizers and such.

    // Objects with finalizers must be kept alive until the finalizers have
    // run.
    walk_finalizers();

    // Process possible promotions from processing objects with finalizers.
    handle_promotions();

    if(!promoted_stack_.empty()) rubinius::bug("promote stack has elements!");
    if(!fully_scanned_p()) rubinius::bug("more young refs");

    // Check any weakrefs and replace dead objects with nil
    clean_weakrefs(true);

    // Swap the 2 halves
    Heap *x = next;
    next = current;
    current = x;

    if(stats) {
      stats->lifetime = lifetime_;
      stats->percentage_used = current->percentage_used();
      stats->promoted_objects = promoted_objects_;
      stats->excess_objects = copy_spills_;
    }

    // Tune the age at which promotion occurs
    if(autotune_) {
      double used = current->percentage_used();
      if(used > cOverFullThreshold) {
        if(tune_threshold_ >= cOverFullTimes) {
          if(lifetime_ > cMinimumLifetime) lifetime_--;
        } else {
          tune_threshold_++;
        }
      } else if(used < cUnderFullThreshold) {
        if(tune_threshold_ <= cUnderFullTimes) {
          if(lifetime_ < cMaximumLifetime) lifetime_++;
        } else {
          tune_threshold_--;
        }
      } else if(tune_threshold_ > 0) {
        tune_threshold_--;
      } else if(tune_threshold_ < 0) {
        tune_threshold_++;
      } else if(tune_threshold_ == 0) {
        if(lifetime_ < original_lifetime_) {
          lifetime_++;
        } else if(lifetime_ > original_lifetime_) {
          lifetime_--;
        }
      }
    }

  }
Example #21
0
  void ImmixGC::collect(GCData& data) {
    Object* tmp;

    gc_.clear_lines();

    for(Roots::Iterator i(data.roots()); i.more(); i.advance()) {
      tmp = i->get();
      if(tmp->reference_p()) {
        saw_object(tmp);
      }
    }

    for(capi::Handles::Iterator i(*data.handles()); i.more(); i.advance()) {
      if(!i->weak_p()) saw_object(i->object());
    }

    for(capi::Handles::Iterator i(*data.cached_handles()); i.more(); i.advance()) {
      if(!i->weak_p()) saw_object(i->object());
    }

    for(VariableRootBuffers::Iterator i(data.variable_buffers());
        i.more(); i.advance()) {
      Object*** buffer = i->buffer();
      for(int idx = 0; idx < i->size(); idx++) {
        Object** var = buffer[idx];
        Object* tmp = *var;

        if(tmp->reference_p() && tmp->young_object_p()) {
          saw_object(tmp);
        }
      }
    }

    // Walk all the call frames
    for(CallFrameLocationList::const_iterator i = data.call_frames().begin();
        i != data.call_frames().end();
        i++) {
      CallFrame** loc = *i;
      walk_call_frame(*loc);
    }

    gc_.process_mark_stack(allocator_);

    // Sweep up the garbage
    gc_.sweep_blocks();

    // This resets the allocator state to sync it up with the BlockAllocator
    // properly.
    allocator_.get_new_block();

    ObjectArray *current_rs = object_memory->remember_set;

    int cleared = 0;

    for(ObjectArray::iterator oi = current_rs->begin();
        oi != current_rs->end();
        oi++) {
      tmp = *oi;
      // unremember_object throws a NULL in to remove an object
      // so we don't have to compact the set in unremember
      if(tmp) {
        assert(tmp->zone == MatureObjectZone);
        assert(!tmp->forwarded_p());

        if(!tmp->marked_p()) {
          cleared++;
          *oi = NULL;
        }
      }
    }

    // Switch the which_mark_ for next time.
    which_mark_ = (which_mark_ == 1 ? 2 : 1);

#ifdef IMMIX_DEBUG
    std::cout << "Immix: RS size cleared: " << cleared << "\n";

    immix::Chunks& chunks = gc_.block_allocator().chunks();
    std::cout << "chunks=" << chunks.size() << "\n";

    immix::AllBlockIterator iter(chunks);

    int blocks_seen = 0;
    int total_objects = 0;
    int total_object_bytes = 0;

    while(immix::Block* block = iter.next()) {
      blocks_seen++;
      std::cout << "block " << block << ", holes=" << block->holes() << " "
                << "objects=" << block->objects() << " "
                << "object_bytes=" << block->object_bytes() << " "
                << "frag=" << block->fragmentation_ratio()
                << "\n";

      total_objects += block->objects();
      total_object_bytes += block->object_bytes();
    }

    std::cout << blocks_seen << " blocks\n";
    std::cout << gc_.bytes_allocated() << " bytes allocated\n";
    std::cout << total_object_bytes << " object bytes / " << total_objects << " objects\n";

    int* holes = new int[10];
    for(int i = 0; i < 10; i++) {
      holes[i] = 0;
    }

    immix::AllBlockIterator iter2(chunks);

    while(immix::Block* block = iter2.next()) {
      int h = block->holes();
      if(h > 9) h = 9;

      holes[h]++;
    }

    std::cout << "== hole stats ==\n";
    for(int i = 0; i < 10; i++) {
      if(holes[i] > 0) {
        std::cout << i << ": " << holes[i] << "\n";
      }
    }
#endif
  }
Example #22
0
void Handles::deallocate_handles(std::list<Handle*>* cached, int mark, BakerGC* young) {
    std::vector<bool> chunk_marks(allocator_->chunks_.size(), false);

    for(std::vector<int>::size_type i = 0; i < allocator_->chunks_.size(); ++i) {
        Handle* chunk = allocator_->chunks_[i];

        for(size_t j = 0; j < allocator_->cChunkSize; j++) {
            Handle* handle = &chunk[j];

            Object* obj = handle->object();

            if(!handle->in_use_p()) {
                continue;
            }

            // Strong references will already have been updated.
            if(!handle->weak_p()) {
                chunk_marks[i] = true;
                continue;
            }

            if(young) {
                if(obj->young_object_p()) {
                    // A weakref pointing to a valid young object
                    //
                    // TODO this only works because we run prune_handles right after
                    // a collection. In this state, valid objects are only in current.
                    if(young->in_current_p(obj)) {
                        chunk_marks[i] = true;
                        // A weakref pointing to a forwarded young object
                    } else if(obj->forwarded_p()) {
                        handle->set_object(obj->forward());
                        chunk_marks[i] = true;
                        // A weakref pointing to a dead young object
                    } else {
                        handle->clear();
                    }
                } else {
                    // Not a young object, so won't be GC'd so mark
                    // chunk as still active
                    chunk_marks[i] = true;
                }

                // A weakref pointing to a dead mature object
            } else if(!obj->marked_p(mark)) {
                handle->clear();
            } else {
                chunk_marks[i] = true;
            }
        }
    }

    // Cleanup cached handles
    for(std::list<Handle*>::iterator it = cached->begin(); it != cached->end();) {
        Handle* handle = *it;

        if(handle->in_use_p()) {
            ++it;
        } else {
            it = cached->erase(it);
        }
    }

    allocator_->rebuild_freelist(&chunk_marks);
}
Example #23
0
  /**
   * Perform garbage collection on the young objects.
   */
  void BakerGC::collect(GCData* data, YoungCollectStats* stats) {

#ifdef HAVE_VALGRIND_H
    (void)VALGRIND_MAKE_MEM_DEFINED(next->start().as_int(), next->size());
    (void)VALGRIND_MAKE_MEM_DEFINED(current->start().as_int(), current->size());
#endif
    mprotect(next->start(), next->size(), PROT_READ | PROT_WRITE);
    mprotect(current->start(), current->size(), PROT_READ | PROT_WRITE);

    check_growth_start();

    ObjectArray *current_rs = object_memory_->swap_remember_set();

    total_objects = 0;

    copy_spills_ = 0;
    reset_promoted();

    // Start by copying objects in the remember set
    for(ObjectArray::iterator oi = current_rs->begin();
        oi != current_rs->end();
        ++oi) {
      Object* tmp = *oi;
      // unremember_object throws a NULL in to remove an object
      // so we don't have to compact the set in unremember
      if(tmp) {
        // Remove the Remember bit, since we're clearing the set.
        tmp->clear_remember();
        scan_object(tmp);
      }
    }

    delete current_rs;

    scan_mark_set();
    scan_mature_mark_stack();

    for(Roots::Iterator i(data->roots()); i.more(); i.advance()) {
      i->set(saw_object(i->get()));
    }

    if(data->threads()) {
      for(std::list<ManagedThread*>::iterator i = data->threads()->begin();
          i != data->threads()->end();
          ++i) {
        scan(*i, true);
      }
    }

    for(Allocator<capi::Handle>::Iterator i(data->handles()->allocator()); i.more(); i.advance()) {
      if(!i->in_use_p()) continue;

      if(!i->weak_p() && i->object()->young_object_p()) {
        i->set_object(saw_object(i->object()));

      // Users manipulate values accessible from the data* within an
      // RData without running a write barrier. Thusly if we see a mature
      // rdata, we must always scan it because it could contain
      // young pointers.
      } else if(!i->object()->young_object_p() && i->is_rdata()) {
        scan_object(i->object());
      }
    }

    std::list<capi::GlobalHandle*>* gh = data->global_handle_locations();

    if(gh) {
      for(std::list<capi::GlobalHandle*>::iterator i = gh->begin();
          i != gh->end();
          ++i) {
        capi::GlobalHandle* global_handle = *i;
        capi::Handle** loc = global_handle->handle();
        if(capi::Handle* hdl = *loc) {
          if(!REFERENCE_P(hdl)) continue;
          if(hdl->valid_p()) {
            Object* obj = hdl->object();
            if(obj && obj->reference_p() && obj->young_object_p()) {
              hdl->set_object(saw_object(obj));
            }
          } else {
            std::cerr << "Detected bad handle checking global capi handles\n";
          }
        }
      }
    }

#ifdef ENABLE_LLVM
    if(LLVMState* ls = data->llvm_state()) ls->gc_scan(this);
#endif

    // Handle all promotions to non-young space that occurred.
    handle_promotions();

    assert(fully_scanned_p());
    // We're now done seeing the entire object graph of normal, live references.
    // Now we get to handle the unusual references, like finalizers and such.

    // Check any weakrefs and replace dead objects with nil
    // We need to do this before checking finalizers so people can't access
    // objects kept alive for finalization through weakrefs.
    clean_weakrefs(true);

    do {
      // Objects with finalizers must be kept alive until the finalizers have
      // run.
      walk_finalizers();
      // Scan any fibers that aren't running but still active
      scan_fibers(data, false);
      handle_promotions();
    } while(!promoted_stack_.empty() && !fully_scanned_p());

    // Remove unreachable locked objects still in the list
    if(data->threads()) {
      for(std::list<ManagedThread*>::iterator i = data->threads()->begin();
          i != data->threads()->end();
          ++i) {
        clean_locked_objects(*i, true);
      }
    }

    // Update the pending mark set to remove unreachable objects.
    update_mark_set();

    // Update the existing mark stack of the mature gen because young
    // objects might have moved.
    update_mature_mark_stack();

    // Update the weak ref set to remove unreachable weak refs.
    update_weak_refs_set();

    // Swap the 2 halves
    Heap* x = next;
    next = current;
    current = x;

    if(stats) {
      stats->lifetime = lifetime_;
      stats->percentage_used = current->percentage_used();
      stats->promoted_objects = promoted_objects_;
      stats->excess_objects = copy_spills_;
    }

    // Tune the age at which promotion occurs
    if(autotune_lifetime_) {
      double used = current->percentage_used();
      if(used > cOverFullThreshold) {
        if(tune_threshold_ >= cOverFullTimes) {
          if(lifetime_ > cMinimumLifetime) lifetime_--;
        } else {
          tune_threshold_++;
        }
      } else if(used < cUnderFullThreshold) {
        if(tune_threshold_ <= cUnderFullTimes) {
          if(lifetime_ < cMaximumLifetime) lifetime_++;
        } else {
          tune_threshold_--;
        }
      } else if(tune_threshold_ > 0) {
        tune_threshold_--;
      } else if(tune_threshold_ < 0) {
        tune_threshold_++;
      } else if(tune_threshold_ == 0) {
        if(lifetime_ < original_lifetime_) {
          lifetime_++;
        } else if(lifetime_ > original_lifetime_) {
          lifetime_--;
        }
      }
    }

  }
Example #24
0
  void ImmixGC::collect(GCData& data) {
    Object* tmp;

    gc_.clear_lines();

    int via_handles_ = 0;
    int via_roots = 0;
    int via_stack = 0;
    int callframes = 0;

    for(Roots::Iterator i(data.roots()); i.more(); i.advance()) {
      tmp = i->get();
      if(tmp->reference_p()) saw_object(tmp);
      via_roots++;
    }

    if(data.threads()) {
      for(std::list<ManagedThread*>::iterator i = data.threads()->begin();
          i != data.threads()->end();
          i++) {
        for(Roots::Iterator ri((*i)->roots()); ri.more(); ri.advance()) {
          ri->set(saw_object(ri->get()));
        }
      }
    }

    for(capi::Handles::Iterator i(*data.handles()); i.more(); i.advance()) {
      if(i->in_use_p() && !i->weak_p()) {
        saw_object(i->object());
        via_handles_++;
      }
    }

    for(capi::Handles::Iterator i(*data.cached_handles()); i.more(); i.advance()) {
      if(i->in_use_p() && !i->weak_p()) {
        saw_object(i->object());
        via_handles_++;
      }
    }

    std::list<capi::Handle**>* gh = data.global_handle_locations();

    if(gh) {
      for(std::list<capi::Handle**>::iterator i = gh->begin();
          i != gh->end();
          i++) {
        capi::Handle** loc = *i;
        if(capi::Handle* hdl = *loc) {
          if(!CAPI_REFERENCE_P(hdl)) continue;
          if(hdl->valid_p()) {
            Object* obj = hdl->object();
            if(obj && obj->reference_p()) {
              saw_object(obj);
              via_handles_++;
            }
          } else {
            std::cerr << "Detected bad handle checking global capi handles\n";
          }
        }
      }
    }

    for(VariableRootBuffers::Iterator i(data.variable_buffers());
        i.more(); i.advance()) {
      Object*** buffer = i->buffer();
      for(int idx = 0; idx < i->size(); idx++) {
        Object** var = buffer[idx];
        Object* tmp = *var;

        via_stack++;
        if(tmp->reference_p() && tmp->young_object_p()) {
          saw_object(tmp);
        }
      }
    }

    // Walk all the call frames
    for(CallFrameLocationList::const_iterator i = data.call_frames().begin();
        i != data.call_frames().end();
        i++) {
      callframes++;
      CallFrame** loc = *i;
      walk_call_frame(*loc);
    }

    gc_.process_mark_stack(allocator_);

    // We've now finished marking the entire object graph.

    check_finalize();

    // Finalize can cause more things to continue to live, so we must
    // check the mark_stack again.
    gc_.process_mark_stack(allocator_);

    // Sweep up the garbage
    gc_.sweep_blocks();

    // This resets the allocator state to sync it up with the BlockAllocator
    // properly.
    allocator_.get_new_block();

    ObjectArray *current_rs = object_memory_->remember_set();

    int cleared = 0;

    for(ObjectArray::iterator oi = current_rs->begin();
        oi != current_rs->end();
        oi++) {
      tmp = *oi;
      // unremember_object throws a NULL in to remove an object
      // so we don't have to compact the set in unremember
      if(tmp) {
        assert(tmp->zone() == MatureObjectZone);
        assert(!tmp->forwarded_p());

        if(!tmp->marked_p(object_memory_->mark())) {
          cleared++;
          *oi = NULL;
        }
      }
    }

    for(std::list<gc::WriteBarrier*>::iterator wbi = object_memory_->aux_barriers().begin();
        wbi != object_memory_->aux_barriers().end();
        wbi++) {
      gc::WriteBarrier* wb = *wbi;
      ObjectArray* rs = wb->remember_set();
      for(ObjectArray::iterator oi = rs->begin();
          oi != rs->end();
          oi++) {
        tmp = *oi;

        if(tmp) {
          assert(tmp->zone() == MatureObjectZone);
          assert(!tmp->forwarded_p());

          if(!tmp->marked_p(object_memory_->mark())) {
            cleared++;
            *oi = NULL;
          }
        }
      }
    }


    // Now, calculate how much space we're still using.
    immix::Chunks& chunks = gc_.block_allocator().chunks();
    immix::AllBlockIterator iter(chunks);

    int live_bytes = 0;
    int total_bytes = 0;

    while(immix::Block* block = iter.next()) {
      total_bytes += immix::cBlockSize;
      live_bytes += block->bytes_from_lines();
    }

    double percentage_live = (double)live_bytes / (double)total_bytes;

    if(object_memory_->state->shared.config.gc_immix_debug) {
      std::cerr << "[GC IMMIX: " << clear_marked_objects() << " marked"
                << ", "
                << via_roots << " roots "
                << via_handles_ << " handles "
                << (int)(percentage_live * 100) << "% live"
                << ", " << live_bytes << "/" << total_bytes
                << "]\n";
    }

    if(percentage_live >= 0.90) {
      if(object_memory_->state->shared.config.gc_immix_debug) {
        std::cerr << "[GC IMMIX: expanding. "
                   << (int)(percentage_live * 100)
                   << "%]\n";
      }
      gc_.block_allocator().add_chunk();
    }

#ifdef IMMIX_DEBUG
    std::cout << "Immix: RS size cleared: " << cleared << "\n";

    immix::Chunks& chunks = gc_.block_allocator().chunks();
    std::cout << "chunks=" << chunks.size() << "\n";

    immix::AllBlockIterator iter(chunks);

    int blocks_seen = 0;
    int total_objects = 0;
    int total_object_bytes = 0;

    while(immix::Block* block = iter.next()) {
      blocks_seen++;
      std::cout << "block " << block << ", holes=" << block->holes() << " "
                << "objects=" << block->objects() << " "
                << "object_bytes=" << block->object_bytes() << " "
                << "frag=" << block->fragmentation_ratio()
                << "\n";

      total_objects += block->objects();
      total_object_bytes += block->object_bytes();
    }

    std::cout << blocks_seen << " blocks\n";
    std::cout << gc_.bytes_allocated() << " bytes allocated\n";
    std::cout << total_object_bytes << " object bytes / " << total_objects << " objects\n";

    int* holes = new int[10];
    for(int i = 0; i < 10; i++) {
      holes[i] = 0;
    }

    immix::AllBlockIterator iter2(chunks);

    while(immix::Block* block = iter2.next()) {
      int h = block->holes();
      if(h > 9) h = 9;

      holes[h]++;
    }

    std::cout << "== hole stats ==\n";
    for(int i = 0; i < 10; i++) {
      if(holes[i] > 0) {
        std::cout << i << ": " << holes[i] << "\n";
      }
    }
#endif
  }
Example #25
0
  /* Perform garbage collection on the young objects. */
  void BakerGC::collect(GCData& data) {
#ifdef RBX_GC_STATS
    stats::GCStats::get()->bytes_copied.start();
    stats::GCStats::get()->objects_copied.start();
    stats::GCStats::get()->objects_promoted.start();
    stats::GCStats::get()->collect_young.start();
#endif

    Object* tmp;
    ObjectArray *current_rs = object_memory->remember_set;

    object_memory->remember_set = new ObjectArray(0);
    total_objects = 0;

    // Tracks all objects that we promoted during this run, so
    // we can scan them at the end.
    promoted_ = new ObjectArray(0);

    promoted_current = promoted_insert = promoted_->begin();

    for(ObjectArray::iterator oi = current_rs->begin();
        oi != current_rs->end();
        ++oi) {
      tmp = *oi;
      // unremember_object throws a NULL in to remove an object
      // so we don't have to compact the set in unremember
      if(tmp) {
        assert(tmp->zone == MatureObjectZone);
        assert(!tmp->forwarded_p());

        // Remove the Remember bit, since we're clearing the set.
        tmp->clear_remember();
        scan_object(tmp);
      }
    }

    delete current_rs;

    for(Roots::Iterator i(data.roots()); i.more(); i.advance()) {
      tmp = i->get();
      if(tmp->reference_p() && tmp->young_object_p()) {
        i->set(saw_object(tmp));
      }
    }

    for(VariableRootBuffers::Iterator i(data.variable_buffers());
        i.more(); i.advance()) {
      Object*** buffer = i->buffer();
      for(int idx = 0; idx < i->size(); idx++) {
        Object** var = buffer[idx];
        Object* tmp = *var;

        if(tmp->reference_p() && tmp->young_object_p()) {
          *var = saw_object(tmp);
        }
      }
    }

    // Walk all the call frames
    for(CallFrameLocationList::iterator i = data.call_frames().begin();
        i != data.call_frames().end();
        i++) {
      CallFrame** loc = *i;
      walk_call_frame(*loc);
    }

    /* Ok, now handle all promoted objects. This is setup a little weird
     * so I should explain.
     *
     * We want to scan each promoted object. But this scanning will likely
     * cause more objects to be promoted. Adding to an ObjectArray that your
     * iterating over blows up the iterators, so instead we rotate the
     * current promoted set out as we iterator over it, and stick an
     * empty ObjectArray in.
     *
     * This way, when there are no more objects that are promoted, the last
     * ObjectArray will be empty.
     * */

    promoted_current = promoted_insert = promoted_->begin();

    while(promoted_->size() > 0 || !fully_scanned_p()) {
      if(promoted_->size() > 0) {
        for(;promoted_current != promoted_->end();
            ++promoted_current) {
          tmp = *promoted_current;
          assert(tmp->zone == MatureObjectZone);
          scan_object(tmp);
          if(watched_p(tmp)) {
            std::cout << "detected " << tmp << " during scan of promoted objects.\n";
          }
        }

        promoted_->resize(promoted_insert - promoted_->begin());
        promoted_current = promoted_insert = promoted_->begin();

      }

      /* As we're handling promoted objects, also handle unscanned objects.
       * Scanning these unscanned objects (via the scan pointer) will
       * cause more promotions. */
      copy_unscanned();
    }

    assert(promoted_->size() == 0);

    delete promoted_;
    promoted_ = NULL;

    assert(fully_scanned_p());

    /* Another than is going to be found is found now, so we go back and
     * look at everything in current and call delete_object() on anything
     * thats not been forwarded. */
    find_lost_souls();

    /* Check any weakrefs and replace dead objects with nil*/
    clean_weakrefs(true);

    /* Swap the 2 halves */
    Heap *x = next;
    next = current;
    current = x;
    next->reset();

#ifdef RBX_GC_STATS
    stats::GCStats::get()->collect_young.stop();
    stats::GCStats::get()->objects_copied.stop();
    stats::GCStats::get()->objects_promoted.stop();
    stats::GCStats::get()->bytes_copied.stop();
#endif
  }