Exemple #1
0
struct RBasic*
mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
{
  struct RBasic *p;

  if (mrb->gc_threshold < mrb->live) {
    mrb_incremental_gc(mrb);
  }
  if (mrb->free_heaps == NULL) {
    add_heap(mrb);
  }

  p = mrb->free_heaps->freelist;
  mrb->free_heaps->freelist = ((struct free_obj*)p)->next;
  if (mrb->free_heaps->freelist == NULL) {
    unlink_free_heap_page(mrb, mrb->free_heaps);
  }

  mrb->live++;
  mrb->arena[mrb->arena_idx++] = p;
  memset(p, 0, sizeof(RVALUE));
  if (mrb->arena_idx >= MRB_ARENA_SIZE) {
    /* arena overflow error */
    mrb_raise(mrb, E_TYPE_ERROR, "arena overflow error");
  }
  p->tt = ttype;
  p->c = cls;
  paint_partial_white(mrb, p);
  return p;
}
Exemple #2
0
struct RBasic*
mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
{
  struct RBasic *p;

#ifdef MRB_GC_STRESS
  mrb_garbage_collect(mrb);
#endif
  if (mrb->gc_threshold < mrb->live) {
    mrb_incremental_gc(mrb);
  }
  if (mrb->free_heaps == NULL) {
    add_heap(mrb);
  }

  p = mrb->free_heaps->freelist;
  mrb->free_heaps->freelist = ((struct free_obj*)p)->next;
  if (mrb->free_heaps->freelist == NULL) {
    unlink_free_heap_page(mrb, mrb->free_heaps);
  }

  mrb->live++;
  gc_protect(mrb, p);
  memset(p, 0, sizeof(RVALUE));
  p->tt = ttype;
  p->c = cls;
  paint_partial_white(mrb, p);
  return p;
}
Exemple #3
0
struct RBasic*
mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
{
  struct RBasic *p;
  static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } };

#ifdef MRB_GC_STRESS
  mrb_full_gc(mrb);
#endif
  if (mrb->gc_threshold < mrb->live) {
    mrb_incremental_gc(mrb);
  }
  if (mrb->free_heaps == NULL) {
    add_heap(mrb);
  }

  p = mrb->free_heaps->freelist;
  mrb->free_heaps->freelist = ((struct free_obj*)p)->next;
  if (mrb->free_heaps->freelist == NULL) {
    unlink_free_heap_page(mrb, mrb->free_heaps);
  }

  mrb->live++;
  gc_protect(mrb, p);
  *(RVALUE *)p = RVALUE_zero;
  p->tt = ttype;
  p->c = cls;
  paint_partial_white(mrb, p);
  return p;
}
Exemple #4
0
void
RL::Cluster::common_add (HeapPtr heap)
{
  add_heap (heap);

  // FIXME: here we should ask for presence for the heap...

  heap->trigger_saving.connect (boost::bind (&RL::Cluster::save, this));
}
Exemple #5
0
Avahi::Cluster::Cluster (Ekiga::ServiceCore &_core): core(_core)
{
  heap = HeapPtr(new Heap (core));

  add_heap (heap);

  /* don't check the cast: it has been checked already by avahi-main! */
  boost::shared_ptr<Ekiga::PresenceCore> presence_core = core.get<Ekiga::PresenceCore> ("presence-core");
  presence_core->add_presence_fetcher (heap);
}
Exemple #6
0
MRB_API struct RBasic*
mrb_obj_alloc(mrb_state *mrb, enum mrb_vtype ttype, struct RClass *cls)
{
  struct RBasic *p;
  static const RVALUE RVALUE_zero = { { { MRB_TT_FALSE } } };
  mrb_gc *gc = &mrb->gc;

  if (cls) {
    enum mrb_vtype tt;

    switch (cls->tt) {
    case MRB_TT_CLASS:
    case MRB_TT_SCLASS:
    case MRB_TT_MODULE:
    case MRB_TT_ENV:
      break;
    default:
      mrb_raise(mrb, E_TYPE_ERROR, "allocation failure");
    }
    tt = MRB_INSTANCE_TT(cls);
    if (tt != MRB_TT_FALSE &&
        ttype != MRB_TT_SCLASS &&
        ttype != MRB_TT_ICLASS &&
        ttype != MRB_TT_ENV &&
        ttype != tt) {
      mrb_raisef(mrb, E_TYPE_ERROR, "allocation failure of %S", mrb_obj_value(cls));
    }
  }

#ifdef MRB_GC_STRESS
  mrb_full_gc(mrb);
#endif
  if (gc->threshold < gc->live) {
    mrb_incremental_gc(mrb);
  }
  if (gc->free_heaps == NULL) {
    add_heap(mrb, gc);
  }

  p = gc->free_heaps->freelist;
  gc->free_heaps->freelist = ((struct free_obj*)p)->next;
  if (gc->free_heaps->freelist == NULL) {
    unlink_free_heap_page(gc, gc->free_heaps);
  }

  gc->live++;
  gc_protect(mrb, gc, p);
  *(RVALUE *)p = RVALUE_zero;
  p->tt = ttype;
  p->c = cls;
  paint_partial_white(gc, p);
  return p;
}
Exemple #7
0
void
mrb_init_heap(mrb_state *mrb)
{
  mrb->heaps = 0;
  mrb->free_heaps = 0;
  add_heap(mrb);
  mrb->gc_interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
  mrb->gc_step_ratio = DEFAULT_GC_STEP_RATIO;

#ifdef GC_PROFILE
  program_invoke_time = gettimeofday_time();
#endif
}
Avahi::Cluster::Cluster (Ekiga::ServiceCore &_core): core(_core)
{
  Ekiga::PresenceCore* presence_core = NULL;

  heap = new Heap (core);

  add_heap (*heap);

  presence_core
    = dynamic_cast<Ekiga::PresenceCore*>(core.get ("presence-core"));
  /* don't check the dynamic cast: it has been checked already by avahi-main!*/
  presence_core->add_presence_fetcher (*heap);
}
	// assignment from another heap
	Ordered_Heap<T> & operator = (const Ordered_Heap<T> & h)
	{
		if (&h != this)
		{
			if (root)
			{
				root->delete_tree();
				delete root;
			}
			n_entries = 0;
			root = highest = lowest = 0;
			add_heap(h);
		}
		return *this;
	}
Exemple #10
0
SCM_EXPORT void
scm_prealloc_heaps(size_t n)
{
    size_t i;

    if (!n)
        n = l_n_heaps + 1;

    if (n > l_n_heaps_max)
        PLAIN_ERR("heap number ~ZU exceeded maxmum number ~ZU",
                  n, l_n_heaps_max);

    for (i = l_n_heaps; i < n; i++)
        add_heap();
}
Exemple #11
0
void
mrb_init_heap(mrb_state *mrb)
{
  mrb->heaps = NULL;
  mrb->free_heaps = NULL;
  add_heap(mrb);
  mrb->gc_interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
  mrb->gc_step_ratio = DEFAULT_GC_STEP_RATIO;
  mrb->is_generational_gc_mode = TRUE;
  mrb->gc_full = TRUE;

#ifdef GC_PROFILE
  program_invoke_time = gettimeofday_time();
#endif
}
Exemple #12
0
void
test_incremental_sweep_phase(void)
{
  mrb_state *mrb = mrb_open();

  puts("test_incremental_sweep_phase");

  add_heap(mrb);
  mrb->sweeps = mrb->heaps;

  mrb_assert(mrb->heaps->next->next == NULL);
  mrb_assert(mrb->free_heaps->next->next == NULL);
  incremental_sweep_phase(mrb, MRB_HEAP_PAGE_SIZE*3);

  mrb_assert(mrb->heaps->next == NULL);
  mrb_assert(mrb->heaps == mrb->free_heaps);

  mrb_close(mrb);
}
Exemple #13
0
static void
gc_mark_and_sweep(void)
{
    size_t n_collected;

    SCM_BEGIN_GC_SUBCONTEXT();

    CDBG((SCM_DBG_GC, "[ gc start ]"));

    gc_mark();
    n_collected = gc_sweep();

    if (n_collected < l_heap_alloc_threshold) {
        CDBG((SCM_DBG_GC, "enough number of free cells cannot be collected. allocating new heap."));
        add_heap();
    }

    SCM_END_GC_SUBCONTEXT();
}
Exemple #14
0
void
mrb_gc_init(mrb_state *mrb, mrb_gc *gc)
{
#ifndef MRB_GC_FIXED_ARENA
  gc->arena = (struct RBasic**)mrb_malloc(mrb, sizeof(struct RBasic*)*MRB_GC_ARENA_SIZE);
  gc->arena_capa = MRB_GC_ARENA_SIZE;
#endif

  gc->current_white_part = GC_WHITE_A;
  gc->heaps = NULL;
  gc->free_heaps = NULL;
  add_heap(mrb, gc);
  gc->interval_ratio = DEFAULT_GC_INTERVAL_RATIO;
  gc->step_ratio = DEFAULT_GC_STEP_RATIO;
#ifndef MRB_GC_TURN_OFF_GENERATIONAL
  gc->generational = TRUE;
  gc->full = TRUE;
#endif

#ifdef GC_PROFILE
  program_invoke_time = gettimeofday_time();
#endif
}
Exemple #15
0
int main()
{
    Heap *h;
    int num;
    char op;

    h	= (Heap *) malloc(sizeof(Heap));

    init_heap(h);


    while (1) {
        printf("please input option:\n");
        scanf("%c", &op);
        FLUSH_STDIN;
        if (op == 'a') {
            printf("please input will add num:\n");
            scanf("%d", &num);
            FLUSH_STDIN;
            add_heap(h, num);

            PRT_HEAP(h);
        } else if (op == 'd') {
            delete_max_heap(h);

            PRT_HEAP(h);
        } else if (op == 'f') {
            printf("the max is %d\n", find_max_heap(h));
        } else {
            printf("exit program.\n");

            PRT_HEAP(h);
            break;
        }
    }
}
	// add all elements of another heap (merge)
	void add_heap(const Ordered_Heap<T> & h)
	{
		if (h.root) add_heap(*h.root);
	}
	// heap with all the elents of another heap
	Ordered_Heap(const Ordered_Heap<T> & h) : lowest(0), highest(0), root(0), n_entries(0) { add_heap(h); }
	void add_heap(const Ordered_Heap_Member<T> & m)
	{
		add_entry(m.data);
		if (m.lower) add_heap(*m.lower);
		if (m.higher) add_heap(*m.higher);
	}
void CodeCache::initialize_heaps() {
  bool non_nmethod_set      = FLAG_IS_CMDLINE(NonNMethodCodeHeapSize);
  bool profiled_set         = FLAG_IS_CMDLINE(ProfiledCodeHeapSize);
  bool non_profiled_set     = FLAG_IS_CMDLINE(NonProfiledCodeHeapSize);
  size_t min_size           = os::vm_page_size();
  size_t cache_size         = ReservedCodeCacheSize;
  size_t non_nmethod_size   = NonNMethodCodeHeapSize;
  size_t profiled_size      = ProfiledCodeHeapSize;
  size_t non_profiled_size  = NonProfiledCodeHeapSize;
  // Check if total size set via command line flags exceeds the reserved size
  check_heap_sizes((non_nmethod_set  ? non_nmethod_size  : min_size),
                   (profiled_set     ? profiled_size     : min_size),
                   (non_profiled_set ? non_profiled_size : min_size),
                   cache_size,
                   non_nmethod_set && profiled_set && non_profiled_set);

  // Determine size of compiler buffers
  size_t code_buffers_size = 0;
#ifdef COMPILER1
  // C1 temporary code buffers (see Compiler::init_buffer_blob())
  const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
  code_buffers_size += c1_count * Compiler::code_buffer_size();
#endif
#ifdef COMPILER2
  // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
  const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
  // Initial size of constant table (this may be increased if a compiled method needs more space)
  code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif

  // Increase default non_nmethod_size to account for compiler buffers
  if (!non_nmethod_set) {
    non_nmethod_size += code_buffers_size;
  }
  // Calculate default CodeHeap sizes if not set by user
  if (!non_nmethod_set && !profiled_set && !non_profiled_set) {
    // Check if we have enough space for the non-nmethod code heap
    if (cache_size > non_nmethod_size) {
      // Use the default value for non_nmethod_size and one half of the
      // remaining size for non-profiled and one half for profiled methods
      size_t remaining_size = cache_size - non_nmethod_size;
      profiled_size = remaining_size / 2;
      non_profiled_size = remaining_size - profiled_size;
    } else {
      // Use all space for the non-nmethod heap and set other heaps to minimal size
      non_nmethod_size = cache_size - 2 * min_size;
      profiled_size = min_size;
      non_profiled_size = min_size;
    }
  } else if (!non_nmethod_set || !profiled_set || !non_profiled_set) {
    // The user explicitly set some code heap sizes. Increase or decrease the (default)
    // sizes of the other code heaps accordingly. First adapt non-profiled and profiled
    // code heap sizes and then only change non-nmethod code heap size if still necessary.
    intx diff_size = cache_size - (non_nmethod_size + profiled_size + non_profiled_size);
    if (non_profiled_set) {
      if (!profiled_set) {
        // Adapt size of profiled code heap
        if (diff_size < 0 && ((intx)profiled_size + diff_size) <= 0) {
          // Not enough space available, set to minimum size
          diff_size += profiled_size - min_size;
          profiled_size = min_size;
        } else {
          profiled_size += diff_size;
          diff_size = 0;
        }
      }
    } else if (profiled_set) {
      // Adapt size of non-profiled code heap
      if (diff_size < 0 && ((intx)non_profiled_size + diff_size) <= 0) {
        // Not enough space available, set to minimum size
        diff_size += non_profiled_size - min_size;
        non_profiled_size = min_size;
      } else {
        non_profiled_size += diff_size;
        diff_size = 0;
      }
    } else if (non_nmethod_set) {
      // Distribute remaining size between profiled and non-profiled code heaps
      diff_size = cache_size - non_nmethod_size;
      profiled_size = diff_size / 2;
      non_profiled_size = diff_size - profiled_size;
      diff_size = 0;
    }
    if (diff_size != 0) {
      // Use non-nmethod code heap for remaining space requirements
      assert(!non_nmethod_set && ((intx)non_nmethod_size + diff_size) > 0, "sanity");
      non_nmethod_size += diff_size;
    }
  }

  // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
  if(!heap_available(CodeBlobType::MethodProfiled)) {
    non_profiled_size += profiled_size;
    profiled_size = 0;
  }
  // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
  if(!heap_available(CodeBlobType::MethodNonProfiled)) {
    non_nmethod_size += non_profiled_size;
    non_profiled_size = 0;
  }
  // Make sure we have enough space for VM internal code
  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
  if (non_nmethod_size < (min_code_cache_size + code_buffers_size)) {
    vm_exit_during_initialization(err_msg(
        "Not enough space in non-nmethod code heap to run VM: %zuK < %zuK",
        non_nmethod_size/K, (min_code_cache_size + code_buffers_size)/K));
  }

  // Verify sizes and update flag values
  assert(non_profiled_size + profiled_size + non_nmethod_size == cache_size, "Invalid code heap sizes");
  FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, non_nmethod_size);
  FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
  FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);

  // Align CodeHeaps
  size_t alignment = heap_alignment();
  non_nmethod_size = align_size_up(non_nmethod_size, alignment);
  profiled_size   = align_size_down(profiled_size, alignment);

  // Reserve one continuous chunk of memory for CodeHeaps and split it into
  // parts for the individual heaps. The memory layout looks like this:
  // ---------- high -----------
  //    Non-profiled nmethods
  //      Profiled nmethods
  //         Non-nmethods
  // ---------- low ------------
  ReservedCodeSpace rs = reserve_heap_memory(cache_size);
  ReservedSpace non_method_space    = rs.first_part(non_nmethod_size);
  ReservedSpace rest                = rs.last_part(non_nmethod_size);
  ReservedSpace profiled_space      = rest.first_part(profiled_size);
  ReservedSpace non_profiled_space  = rest.last_part(profiled_size);

  // Non-nmethods (stubs, adapters, ...)
  add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
  // Tier 2 and tier 3 (profiled) methods
  add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
  // Tier 1 and tier 4 (non-profiled) methods and native methods
  add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}
void CodeCache::initialize_heaps() {
  // Determine size of compiler buffers
  size_t code_buffers_size = 0;
#ifdef COMPILER1
  // C1 temporary code buffers (see Compiler::init_buffer_blob())
  const int c1_count = CompilationPolicy::policy()->compiler_count(CompLevel_simple);
  code_buffers_size += c1_count * Compiler::code_buffer_size();
#endif
#ifdef COMPILER2
  // C2 scratch buffers (see Compile::init_scratch_buffer_blob())
  const int c2_count = CompilationPolicy::policy()->compiler_count(CompLevel_full_optimization);
  // Initial size of constant table (this may be increased if a compiled method needs more space)
  code_buffers_size += c2_count * C2Compiler::initial_code_buffer_size();
#endif

  // Calculate default CodeHeap sizes if not set by user
  if (!FLAG_IS_CMDLINE(NonNMethodCodeHeapSize) && !FLAG_IS_CMDLINE(ProfiledCodeHeapSize)
      && !FLAG_IS_CMDLINE(NonProfiledCodeHeapSize)) {
    // Increase default NonNMethodCodeHeapSize to account for compiler buffers
    FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + code_buffers_size);

    // Check if we have enough space for the non-nmethod code heap
    if (ReservedCodeCacheSize > NonNMethodCodeHeapSize) {
      // Use the default value for NonNMethodCodeHeapSize and one half of the
      // remaining size for non-profiled methods and one half for profiled methods
      size_t remaining_size = ReservedCodeCacheSize - NonNMethodCodeHeapSize;
      size_t profiled_size = remaining_size / 2;
      size_t non_profiled_size = remaining_size - profiled_size;
      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, profiled_size);
      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, non_profiled_size);
    } else {
      // Use all space for the non-nmethod heap and set other heaps to minimal size
      FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, ReservedCodeCacheSize - os::vm_page_size() * 2);
      FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, os::vm_page_size());
      FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, os::vm_page_size());
    }
  }

  // We do not need the profiled CodeHeap, use all space for the non-profiled CodeHeap
  if(!heap_available(CodeBlobType::MethodProfiled)) {
    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, NonProfiledCodeHeapSize + ProfiledCodeHeapSize);
    FLAG_SET_ERGO(uintx, ProfiledCodeHeapSize, 0);
  }
  // We do not need the non-profiled CodeHeap, use all space for the non-nmethod CodeHeap
  if(!heap_available(CodeBlobType::MethodNonProfiled)) {
    FLAG_SET_ERGO(uintx, NonNMethodCodeHeapSize, NonNMethodCodeHeapSize + NonProfiledCodeHeapSize);
    FLAG_SET_ERGO(uintx, NonProfiledCodeHeapSize, 0);
  }

  // Make sure we have enough space for VM internal code
  uint min_code_cache_size = CodeCacheMinimumUseSpace DEBUG_ONLY(* 3);
  if (NonNMethodCodeHeapSize < (min_code_cache_size + code_buffers_size)) {
    vm_exit_during_initialization("Not enough space in non-nmethod code heap to run VM.");
  }
  guarantee(NonProfiledCodeHeapSize + ProfiledCodeHeapSize + NonNMethodCodeHeapSize <= ReservedCodeCacheSize, "Size check");

  // Align CodeHeaps
  size_t alignment = heap_alignment();
  size_t non_method_size = align_size_up(NonNMethodCodeHeapSize, alignment);
  size_t profiled_size   = align_size_down(ProfiledCodeHeapSize, alignment);

  // Reserve one continuous chunk of memory for CodeHeaps and split it into
  // parts for the individual heaps. The memory layout looks like this:
  // ---------- high -----------
  //    Non-profiled nmethods
  //      Profiled nmethods
  //         Non-nmethods
  // ---------- low ------------
  ReservedCodeSpace rs = reserve_heap_memory(ReservedCodeCacheSize);
  ReservedSpace non_method_space    = rs.first_part(non_method_size);
  ReservedSpace rest                = rs.last_part(non_method_size);
  ReservedSpace profiled_space      = rest.first_part(profiled_size);
  ReservedSpace non_profiled_space  = rest.last_part(profiled_size);

  // Non-nmethods (stubs, adapters, ...)
  add_heap(non_method_space, "CodeHeap 'non-nmethods'", CodeBlobType::NonNMethod);
  // Tier 2 and tier 3 (profiled) methods
  add_heap(profiled_space, "CodeHeap 'profiled nmethods'", CodeBlobType::MethodProfiled);
  // Tier 1 and tier 4 (non-profiled) methods and native methods
  add_heap(non_profiled_space, "CodeHeap 'non-profiled nmethods'", CodeBlobType::MethodNonProfiled);
}