Example #1
0
  Object* VM::new_object_typed_dirty(Class* cls, size_t size, object_type type) {
    State state(this);

    if(unlikely(size > om->large_object_threshold)) {
      return om->new_object_typed_enduring_dirty(&state, cls, size, type);
    }

    Object* obj = local_slab().allocate(size).as<Object>();

    if(unlikely(!obj)) {
      if(shared.om->refill_slab(&state, local_slab())) {
        obj = local_slab().allocate(size).as<Object>();
      }

      // If refill_slab fails, obj will still be NULL.

      if(!obj) {
        return om->new_object_typed_dirty(&state, cls, size, type);
      }
    }

    obj->init_header(cls, YoungObjectZone, type);

    return obj;
  }
Example #2
0
  Object* MarkSweepGC::allocate(size_t obj_bytes, bool *collect_now) {
    size_t bytes;
    Object* obj;

    bytes = sizeof(Header) + obj_bytes;

    // std::cout << "ms: " << bytes << ", fields: " << fields << "\n";

    Header *header = (Header*)malloc(bytes);
    Entry *entry = new Entry(header, bytes, obj_bytes);
    header->entry = entry;

    entries.push_back(entry);

    allocated_objects++;
    allocated_bytes += bytes;

    next_collection_bytes -= bytes;
    if(next_collection_bytes < 0) {
      *collect_now = true;
      next_collection_bytes = MS_COLLECTION_BYTES;
    }

    obj = header->to_object();

    obj->init_header(MatureObjectZone, obj_bytes);

    return obj;
  }
Example #3
0
  Object* MarkSweepGC::allocate(size_t bytes, bool *collect_now) {
    Object* obj = reinterpret_cast<Object*>(malloc(bytes));

    // If the allocation failed, we return a NULL pointer
    if(unlikely(!obj)) {
        return NULL;
    }

    entries.push_back(obj);

    object_memory_->state()->metrics()->m.ruby_metrics.memory_large_objects++;
    object_memory_->state()->metrics()->m.ruby_metrics.memory_large_bytes += bytes;

    object_memory_->state()->metrics()->m.ruby_metrics.memory_large_objects_total++;
    object_memory_->state()->metrics()->m.ruby_metrics.memory_large_bytes_total += bytes;

    next_collection_bytes -= bytes;
    if(next_collection_bytes < 0) {
      *collect_now = true;
      next_collection_bytes = collection_threshold;
    }

    obj->init_header(MatureObjectZone, InvalidType);

    return obj;
  }
Example #4
0
  Object* ImmixGC::allocate(int bytes) {
    if(bytes > immix::cMaxObjectSize) return 0;

    Object* obj = allocator_.allocate(bytes).as<Object>();
    obj->init_header(MatureObjectZone, InvalidType);
    obj->set_in_immix();

    return obj;
  }
  Object* ImmixGC::allocate(uint32_t bytes, bool& collect_now) {
    if(bytes > cMaxObjectSize) return 0;

    Object* obj = allocator_.allocate(bytes, collect_now).as<Object>();
    if(likely(obj)) {
      obj->init_header(MatureObjectZone, InvalidType);
      obj->set_in_immix();
    }

    return obj;
  }
Example #6
0
  Object* Heap::copy_object(STATE, Object* orig) {
    size_t bytes = orig->size_in_bytes(state);
    Object* tmp = (Object*)allocate(bytes);
    tmp->init_header(YoungObjectZone, orig->type_id());

    tmp->initialize_copy(orig, orig->age());
    tmp->copy_body(state, orig);

#ifdef RBX_GC_STATS
    stats::GCStats::get()->objects_copied++;
    stats::GCStats::get()->bytes_copied += bytes;
#endif

    return tmp;
  }
Example #7
0
  Object* ImmixGC::allocate(int bytes) {
#ifdef RBX_GC_STATS
    stats::GCStats::get()->mature_bytes_allocated += bytes;
    stats::GCStats::get()->allocate_mature.start();
#endif

    Object* obj = allocator_.allocate(bytes).as<Object>();
    obj->init_header(MatureObjectZone, InvalidType);
    obj->set_in_immix();

#ifdef RBX_GC_STATS
    stats::GCStats::get()->allocate_mature.stop();
#endif

    return obj;
  }
Example #8
0
Object* VM::new_object_typed(Class* cls, size_t size, object_type type) {
    Object* obj = reinterpret_cast<Object*>(local_slab().allocate(size));

    if(unlikely(!obj)) {
        if(shared.om->refill_slab(local_slab())) {
            obj = reinterpret_cast<Object*>(local_slab().allocate(size));
        }

        // If refill_slab fails, obj will still be NULL.

        if(!obj) {
            return om->new_object_typed(cls, size, type);
        }
    }

    obj->init_header(cls, YoungObjectZone, type);
    obj->clear_fields(size);

    return obj;
}
Example #9
0
    /* Inline methods */
    Object* allocate(size_t bytes, bool *collect_now) {
      Object* obj;

#ifdef RBX_GC_STATS
      stats::GCStats::get()->young_bytes_allocated += bytes;
      stats::GCStats::get()->allocate_young.start();
#endif

      if(!current->enough_space_p(bytes)) {
#if 0
        if(!next->enough_space_p(bytes)) {
          return NULL;
        } else {
          total_objects++;
          obj = (Object*)next->allocate(bytes);
        }
#endif
        *collect_now = true;

#ifdef RBX_GC_STATS
      stats::GCStats::get()->allocate_young.stop();
#endif

        return NULL;
      } else {
        total_objects++;
        obj = (Object*)current->allocate(bytes);
      }

      if(watched_p(obj)) {
        std::cout << "detected " << obj << " during baker allocation.\n";
      }

      obj->init_header(YoungObjectZone, bytes);

#ifdef RBX_GC_STATS
      stats::GCStats::get()->allocate_young.stop();
#endif

      return obj;
    }
Example #10
0
    /**
     * Attempts to allocate an object of the specified size from the Eden heap.
     *
     * If successful, the returned object's header is initiliazed to the young
     * generation.
     *
     * If there is insufficient space remaining, NULL is returned and the
     * limit_hit parameter is set to true.
     */
    Object* allocate(size_t bytes, bool* limit_hit) {
      Object* obj;

      if(!eden->enough_space_p(bytes)) {
        return NULL;
      } else {
        obj = eden->allocate(bytes).as<Object>();

        if(eden->over_limit_p(obj)) {
          *limit_hit = true;
        }
      }

#ifdef ENABLE_OBJECT_WATCH
      if(watched_p(obj)) {
        std::cout << "detected " << obj << " during baker allocation.\n";
      }
#endif

      obj->init_header(YoungObjectZone, InvalidType);

      return obj;
    }
Example #11
0
    Object* allocate(size_t bytes) {
      Object* obj;

#ifdef RBX_GC_STATS
      stats::GCStats::get()->young_bytes_allocated += bytes;
      stats::GCStats::get()->allocate_young.start();
#endif

      if(!eden.enough_space_p(bytes)) {
#ifdef RBX_GC_STATS
       stats::GCStats::get()->allocate_young.stop();
#endif
        return NULL;
      } else {
        lock_.lock();

        total_objects++;
        obj = (Object*)eden.allocate(bytes);

        lock_.unlock();
      }

#ifdef ENABLE_OBJECT_WATCH
      if(watched_p(obj)) {
        std::cout << "detected " << obj << " during baker allocation.\n";
      }
#endif

      obj->init_header(YoungObjectZone, InvalidType);

#ifdef RBX_GC_STATS
      stats::GCStats::get()->allocate_young.stop();
#endif

      return obj;
    }
Example #12
0
  Object* MarkSweepGC::allocate(size_t bytes, bool *collect_now) {
    Object* obj;

#ifdef RBX_GC_STATS
    stats::GCStats::get()->mature_bytes_allocated += bytes;
    stats::GCStats::get()->allocate_mature.start();
#endif

#ifdef USE_DLMALLOC
    obj = reinterpret_cast<Object*>(malloc_.allocate(bytes));
#else
    obj = reinterpret_cast<Object*>(malloc(bytes));
#endif

    // If the allocation failed, we return a NULL pointer
    if(unlikely(!obj)) return NULL;

    entries.push_back(obj);

    allocated_objects++;
    allocated_bytes += bytes;

    next_collection_bytes -= bytes;
    if(next_collection_bytes < 0) {
      *collect_now = true;
      next_collection_bytes = collection_threshold;
    }

    obj->init_header(MatureObjectZone, InvalidType);

#ifdef RBX_GC_STATS
    stats::GCStats::get()->allocate_mature.stop();
#endif

    return obj;
  }