コード例 #1
0
ファイル: inline_cache.cpp プロジェクト: AlexIljin/factor
// The cache_entries parameter is empty (on cold call site) or has entries
// (on cache miss). Called from assembly with the actual return address.
// Compilation of the inline cache may trigger a GC, which may trigger a
// compaction;
// also, the block containing the return address may now be dead. Use a
// code_root to take care of the details.
// Allocates memory
cell factor_vm::inline_cache_miss(cell return_address_) {
  code_root return_address(return_address_, this);
  bool tail_call_site = tail_call_site_p(return_address.value);

#ifdef PIC_DEBUG
  FACTOR_PRINT("Inline cache miss at "
               << (tail_call_site ? "tail" : "non-tail")
               << " call site 0x" << std::hex << return_address.value
               << std::dec);
  print_callstack();
#endif

  data_root<array> cache_entries(ctx->pop(), this);
  fixnum index = untag_fixnum(ctx->pop());
  data_root<array> methods(ctx->pop(), this);
  data_root<word> generic_word(ctx->pop(), this);
  data_root<object> object(((cell*)ctx->datastack)[-index], this);

  cell pic_size = array_capacity(cache_entries.untagged()) / 2;

  update_pic_transitions(pic_size);

  cell xt = generic_word->entry_point;
  if (pic_size < max_pic_size) {
    cell klass = object_class(object.value());
    cell method = lookup_method(object.value(), methods.value());

    data_root<array> new_cache_entries(
        add_inline_cache_entry(cache_entries.value(), klass, method), this);

    inline_cache_jit jit(generic_word.value(), this);
    jit.emit_inline_cache(index, generic_word.value(), methods.value(),
                          new_cache_entries.value(), tail_call_site);
    code_block* code = jit.to_code_block(CODE_BLOCK_PIC, JIT_FRAME_SIZE);
    initialize_code_block(code);
    xt = code->entry_point();
  }

  // Install the new stub.
  if (return_address.valid) {
    // Since each PIC is only referenced from a single call site,
    // if the old call target was a PIC, we can deallocate it immediately,
    // instead of leaving dead PICs around until the next GC.
    deallocate_inline_cache(return_address.value);
    set_call_target(return_address.value, xt);

#ifdef PIC_DEBUG
    FACTOR_PRINT("Updated " << (tail_call_site ? "tail" : "non-tail")
                 << " call site 0x" << std::hex << return_address.value << std::dec
                 << " with 0x" << std::hex << (cell)xt << std::dec);
    print_callstack();
#endif
  }

  return xt;
}
コード例 #2
0
ファイル: inline_cache.cpp プロジェクト: azteca/factor
/* The cache_entries parameter is either f (on cold call site) or an array (on cache miss).
Called from assembly with the actual return address */
void *factor_vm::inline_cache_miss(cell return_address)
{
	check_code_pointer(return_address);

	/* Since each PIC is only referenced from a single call site,
	   if the old call target was a PIC, we can deallocate it immediately,
	   instead of leaving dead PICs around until the next GC. */
	deallocate_inline_cache(return_address);

	gc_root<array> cache_entries(dpop(),this);
	fixnum index = untag_fixnum(dpop());
	gc_root<array> methods(dpop(),this);
	gc_root<word> generic_word(dpop(),this);
	gc_root<object> object(((cell *)ds)[-index],this);

	void *xt;

	cell pic_size = inline_cache_size(cache_entries.value());

	update_pic_transitions(pic_size);

	if(pic_size >= max_pic_size)
		xt = megamorphic_call_stub(generic_word.value());
	else
	{
		cell klass = object_class(object.value());
		cell method = lookup_method(object.value(),methods.value());

		gc_root<array> new_cache_entries(add_inline_cache_entry(
							   cache_entries.value(),
							   klass,
							   method),this);
		xt = compile_inline_cache(index,
					  generic_word.value(),
					  methods.value(),
					  new_cache_entries.value(),
					  tail_call_site_p(return_address))->xt();
	}

	/* Install the new stub. */
	set_call_target(return_address,xt);

#ifdef PIC_DEBUG
	printf("Updated %s call site 0x%lx with 0x%lx\n",
	       tail_call_site_p(return_address) ? "tail" : "non-tail",
	       return_address,
	       (cell)xt);
#endif

	return xt;
}
コード例 #3
0
ファイル: inline_cache.cpp プロジェクト: azteca/factor
code_block *factor_vm::compile_inline_cache(fixnum index,cell generic_word_,cell methods_,cell cache_entries_,bool tail_call_p)
{
	gc_root<word> generic_word(generic_word_,this);
	gc_root<array> methods(methods_,this);
	gc_root<array> cache_entries(cache_entries_,this);

	inline_cache_jit jit(generic_word.value(),this);
	jit.compile_inline_cache(index,
				 generic_word.value(),
				 methods.value(),
				 cache_entries.value(),
				 tail_call_p);
	code_block *code = jit.to_code_block();
	relocate_code_block(code);
	return code;
}
コード例 #4
0
ファイル: inline_cache.cpp プロジェクト: AlexIljin/factor
// index: 0 = top of stack, 1 = item underneath, etc
// cache_entries: array of class/method pairs
// Allocates memory
void inline_cache_jit::emit_inline_cache(fixnum index, cell generic_word_,
                                         cell methods_, cell cache_entries_,
                                         bool tail_call_p) {
  data_root<word> generic_word(generic_word_, parent);
  data_root<array> methods(methods_, parent);
  data_root<array> cache_entries(cache_entries_, parent);

  cell ic_type = determine_inline_cache_type(cache_entries.untagged());
  parent->update_pic_count(ic_type);

  // Generate machine code to determine the object's class.
  emit_with_literal(parent->special_objects[PIC_LOAD],
                    tag_fixnum(-index * sizeof(cell)));

  // Put the tag of the object, or class of the tuple in a register.
  emit(parent->special_objects[ic_type]);

  // Generate machine code to check, in turn, if the class is one of the cached
  // entries.
  for (cell i = 0; i < array_capacity(cache_entries.untagged()); i += 2) {
    cell klass = array_nth(cache_entries.untagged(), i);
    cell method = array_nth(cache_entries.untagged(), i + 1);

    emit_check_and_jump(ic_type, i, klass, method);
  }

  // If none of the above conditionals tested true, then execution "falls
  // through" to here.

  // A stack frame is set up, since the inline-cache-miss sub-primitive
  // makes a subroutine call to the VM.
  emit(parent->special_objects[JIT_PROLOG]);

  // The inline-cache-miss sub-primitive call receives enough information to
  // reconstruct the PIC with the new entry.
  push(generic_word.value());
  push(methods.value());
  push(tag_fixnum(index));
  push(cache_entries.value());

  emit_subprimitive(
      parent->special_objects[tail_call_p ? PIC_MISS_TAIL_WORD : PIC_MISS_WORD],
      true,  // tail_call_p
      true); // stack_frame_p
}
コード例 #5
0
ファイル: inline_cache.cpp プロジェクト: azteca/factor
/* index: 0 = top of stack, 1 = item underneath, etc
   cache_entries: array of class/method pairs */
void inline_cache_jit::compile_inline_cache(fixnum index,
					    cell generic_word_,
					    cell methods_,
					    cell cache_entries_,
					    bool tail_call_p)
{
	gc_root<word> generic_word(generic_word_,parent);
	gc_root<array> methods(methods_,parent);
	gc_root<array> cache_entries(cache_entries_,parent);

	cell inline_cache_type = parent->determine_inline_cache_type(cache_entries.untagged());
	parent->update_pic_count(inline_cache_type);

	/* Generate machine code to determine the object's class. */
	emit_class_lookup(index,inline_cache_type);

	/* Generate machine code to check, in turn, if the class is one of the cached entries. */
	cell i;
	for(i = 0; i < array_capacity(cache_entries.untagged()); i += 2)
	{
		/* Class equal? */
		cell klass = array_nth(cache_entries.untagged(),i);
		emit_check(klass);

		/* Yes? Jump to method */
		cell method = array_nth(cache_entries.untagged(),i + 1);
		emit_with(parent->userenv[PIC_HIT],method);
	}

	/* Generate machine code to handle a cache miss, which ultimately results in
	   this function being called again.

	   The inline-cache-miss primitive call receives enough information to
	   reconstruct the PIC. */
	push(generic_word.value());
	push(methods.value());
	push(tag_fixnum(index));
	push(cache_entries.value());
	word_special(parent->userenv[tail_call_p ? PIC_MISS_TAIL_WORD : PIC_MISS_WORD]);
}