Ejemplo n.º 1
0
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
  // Stub is fixed up when the corresponding call is converted from calling
  // compiled code to calling interpreted code.
  // set (empty), G5
  // jmp -1

  if (mark == NULL) {
    mark = cbuf.insts_mark();  // Get mark within main instrs section.
  }

  MacroAssembler _masm(&cbuf);

  address base = __ start_a_stub(to_interp_stub_size());
  if (base == NULL) {
    return NULL;  // CodeBuffer::expand failed.
  }

  // Static stub relocation stores the instruction address of the call.
  __ relocate(static_stub_Relocation::spec(mark));

  __ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode()));

  __ set_inst_mark();
  AddressLiteral addrlit(-1);
  __ JUMP(addrlit, G3, 0);

  __ delayed()->nop();

  assert(__ pc() - base <= to_interp_stub_size(), "wrong stub size");

  // Update current stubs pointer and restore code_end.
  __ end_a_stub();
  return base;
}
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
#ifdef COMPILER2
  // Stub is fixed up when the corresponding call is converted from calling
  // compiled code to calling interpreted code.
  // set (empty), G5
  // jmp -1

  address mark = cbuf.insts_mark();  // Get mark within main instrs section.

  MacroAssembler _masm(&cbuf);

  address base =
  __ start_a_stub(to_interp_stub_size()*2);
  if (base == NULL) return;  // CodeBuffer::expand failed.

  // Static stub relocation stores the instruction address of the call.
  __ relocate(static_stub_Relocation::spec(mark));

  __ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode()));

  __ set_inst_mark();
  AddressLiteral addrlit(-1);
  __ JUMP(addrlit, G3, 0);

  __ delayed()->nop();

  // Update current stubs pointer and restore code_end.
  __ end_a_stub();
#else
  ShouldNotReachHere();
#endif
}
Ejemplo n.º 3
0
make_int_expr(expptr e)
#endif
{
    chainp listp;
    Addrp ap;
    expptr e1;

    if (e != ENULL)
	switch (e -> tag) {
	    case TADDR:
		if (e->addrblock.isarray) {
			if (e1 = e->addrblock.memoffset)
				e->addrblock.memoffset = make_int_expr(e1);
			}
		else if (e->addrblock.vstg == STGARG
			|| e->addrblock.vstg == STGCOMMON
				&& e->addrblock.uname_tag == UNAM_NAME
				&& e->addrblock.user.name->vcommequiv)
			e = mkexpr(OPWHATSIN, e, ENULL);
	        break;
	    case TEXPR:
	        e -> exprblock.leftp = make_int_expr (e -> exprblock.leftp);
	        e -> exprblock.rightp = make_int_expr (e -> exprblock.rightp);
	        break;
	    case TLIST:
		for(listp = e->listblock.listp; listp; listp = listp->nextp)
			if ((ap = (Addrp)listp->datap)
			 && ap->tag == TADDR
			 && ap->uname_tag == UNAM_CONST)
				addrlit(ap);
		break;
	    default:
	        break;
	} /* switch */

    return e;
} /* make_int_expr */
Ejemplo n.º 4
0
void PatchingStub::emit_code(LIR_Assembler* ce) {
  // copy original code here
  assert(NativeCall::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
         "not enough room for call");
  assert((_bytes_to_copy & 0x3) == 0, "must copy a multiple of four bytes");

  Label call_patch;

  int being_initialized_entry = __ offset();

  if (_id == load_klass_id) {
    // produce a copy of the load klass instruction for use by the being initialized case
#ifdef ASSERT
    address start = __ pc();
#endif
    AddressLiteral addrlit(NULL, oop_Relocation::spec(_oop_index));
    __ patchable_set(addrlit, _obj);

#ifdef ASSERT
    for (int i = 0; i < _bytes_to_copy; i++) {
      address ptr = (address)(_pc_start + i);
      int a_byte = (*ptr) & 0xFF;
      assert(a_byte == *start++, "should be the same code");
    }
#endif
  } else {
    // make a copy the code which is going to be patched.
    for (int i = 0; i < _bytes_to_copy; i++) {
      address ptr = (address)(_pc_start + i);
      int a_byte = (*ptr) & 0xFF;
      __ a_byte (a_byte);
    }
  }

  address end_of_patch = __ pc();
  int bytes_to_skip = 0;
  if (_id == load_klass_id) {
    int offset = __ offset();
    if (CommentedAssembly) {
      __ block_comment(" being_initialized check");
    }

    // static field accesses have special semantics while the class
    // initializer is being run so we emit a test which can be used to
    // check that this code is being executed by the initializing
    // thread.
    assert(_obj != noreg, "must be a valid register");
    assert(_oop_index >= 0, "must have oop index");
    __ ld_ptr(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc), G3);
    __ cmp(G2_thread, G3);
    __ br(Assembler::notEqual, false, Assembler::pn, call_patch);
    __ delayed()->nop();

    // load_klass patches may execute the patched code before it's
    // copied back into place so we need to jump back into the main
    // code of the nmethod to continue execution.
    __ br(Assembler::always, false, Assembler::pt, _patch_site_continuation);
    __ delayed()->nop();

    // make sure this extra code gets skipped
    bytes_to_skip += __ offset() - offset;
  }

  // Now emit the patch record telling the runtime how to find the
  // pieces of the patch.  We only need 3 bytes but it has to be
  // aligned as an instruction so emit 4 bytes.
  int sizeof_patch_record = 4;
  bytes_to_skip += sizeof_patch_record;

  // emit the offsets needed to find the code to patch
  int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;

  // Emit the patch record.  We need to emit a full word, so emit an extra empty byte
  __ a_byte(0);
  __ a_byte(being_initialized_entry_offset);
  __ a_byte(bytes_to_skip);
  __ a_byte(_bytes_to_copy);
  address patch_info_pc = __ pc();
  assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");

  address entry = __ pc();
  NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
  address target = NULL;
  switch (_id) {
    case access_field_id:  target = Runtime1::entry_for(Runtime1::access_field_patching_id); break;
    case load_klass_id:    target = Runtime1::entry_for(Runtime1::load_klass_patching_id); break;
    default: ShouldNotReachHere();
  }
  __ bind(call_patch);

  if (CommentedAssembly) {
    __ block_comment("patch entry point");
  }
  __ call(target, relocInfo::runtime_call_type);
  __ delayed()->nop();
  assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
  ce->add_call_info_here(_info);
  __ br(Assembler::always, false, Assembler::pt, _patch_site_entry);
  __ delayed()->nop();
  if (_id == load_klass_id) {
    CodeSection* cs = __ code_section();
    address pc = (address)_pc_start;
    RelocIterator iter(cs, pc, pc + 1);
    relocInfo::change_reloc_info_for_address(&iter, (address) pc, relocInfo::oop_type, relocInfo::none);

    pc = (address)(_pc_start + NativeMovConstReg::add_offset);
    RelocIterator iter2(cs, pc, pc+1);
    relocInfo::change_reloc_info_for_address(&iter2, (address) pc, relocInfo::oop_type, relocInfo::none);
  }

}
Ejemplo n.º 5
0
void PatchingStub::emit_code(LIR_Assembler* ce) {
  // Copy original code here.
  assert(NativeGeneralJump::instruction_size <= _bytes_to_copy && _bytes_to_copy <= 0xFF,
         "not enough room for call");

  NearLabel call_patch;

  int being_initialized_entry = __ offset();

  if (_id == load_klass_id) {
    // Produce a copy of the load klass instruction for use by the case being initialized.
#ifdef ASSERT
    address start = __ pc();
#endif
    AddressLiteral addrlit((intptr_t)0, metadata_Relocation::spec(_index));
    __ load_const(_obj, addrlit);

#ifdef ASSERT
    for (int i = 0; i < _bytes_to_copy; i++) {
      address ptr = (address)(_pc_start + i);
      int a_byte = (*ptr) & 0xFF;
      assert(a_byte == *start++, "should be the same code");
    }
#endif
  } else if (_id == load_mirror_id || _id == load_appendix_id) {
    // Produce a copy of the load mirror instruction for use by the case being initialized.
#ifdef ASSERT
    address start = __ pc();
#endif
    AddressLiteral addrlit((intptr_t)0, oop_Relocation::spec(_index));
    __ load_const(_obj, addrlit);

#ifdef ASSERT
    for (int i = 0; i < _bytes_to_copy; i++) {
      address ptr = (address)(_pc_start + i);
      int a_byte = (*ptr) & 0xFF;
      assert(a_byte == *start++, "should be the same code");
    }
#endif
  } else {
    // Make a copy the code which is going to be patched.
    for (int i = 0; i < _bytes_to_copy; i++) {
      address ptr = (address)(_pc_start + i);
      int a_byte = (*ptr) & 0xFF;
      __ emit_int8 (a_byte);
    }
  }

  address end_of_patch = __ pc();
  int bytes_to_skip = 0;
  if (_id == load_mirror_id) {
    int offset = __ offset();
    if (CommentedAssembly) {
      __ block_comment(" being_initialized check");
    }

    // Static field accesses have special semantics while the class
    // initializer is being run, so we emit a test which can be used to
    // check that this code is being executed by the initializing
    // thread.
    assert(_obj != noreg, "must be a valid register");
    assert(_index >= 0, "must have oop index");
    __ z_lg(Z_R1_scratch, java_lang_Class::klass_offset_in_bytes(), _obj);
    __ z_cg(Z_thread, Address(Z_R1_scratch, InstanceKlass::init_thread_offset()));
    __ branch_optimized(Assembler::bcondNotEqual, call_patch);

    // Load_klass patches may execute the patched code before it's
    // copied back into place so we need to jump back into the main
    // code of the nmethod to continue execution.
    __ branch_optimized(Assembler::bcondAlways, _patch_site_continuation);

    // Make sure this extra code gets skipped.
    bytes_to_skip += __ offset() - offset;
  }

  // Now emit the patch record telling the runtime how to find the
  // pieces of the patch. We only need 3 bytes but to help the disassembler
  // we make the data look like a the following add instruction:
  //   A R1, D2(X2, B2)
  // which requires 4 bytes.
  int sizeof_patch_record = 4;
  bytes_to_skip += sizeof_patch_record;

  // Emit the offsets needed to find the code to patch.
  int being_initialized_entry_offset = __ offset() - being_initialized_entry + sizeof_patch_record;

  // Emit the patch record: opcode of the add followed by 3 bytes patch record data.
  __ emit_int8((int8_t)(A_ZOPC>>24));
  __ emit_int8(being_initialized_entry_offset);
  __ emit_int8(bytes_to_skip);
  __ emit_int8(_bytes_to_copy);
  address patch_info_pc = __ pc();
  assert(patch_info_pc - end_of_patch == bytes_to_skip, "incorrect patch info");

  address entry = __ pc();
  NativeGeneralJump::insert_unconditional((address)_pc_start, entry);
  address target = NULL;
  relocInfo::relocType reloc_type = relocInfo::none;
  switch (_id) {
    case access_field_id:  target = Runtime1::entry_for (Runtime1::access_field_patching_id); break;
    case load_klass_id:    target = Runtime1::entry_for (Runtime1::load_klass_patching_id); reloc_type = relocInfo::metadata_type; break;
    case load_mirror_id:   target = Runtime1::entry_for (Runtime1::load_mirror_patching_id); reloc_type = relocInfo::oop_type; break;
    case load_appendix_id: target = Runtime1::entry_for (Runtime1::load_appendix_patching_id); reloc_type = relocInfo::oop_type; break;
    default: ShouldNotReachHere();
  }
  __ bind(call_patch);

  if (CommentedAssembly) {
    __ block_comment("patch entry point");
  }
  // Cannot use call_c_opt() because its size is not constant.
  __ load_const(Z_R1_scratch, target); // Must not optimize in order to keep constant _patch_info_offset constant.
  __ z_basr(Z_R14, Z_R1_scratch);
  assert(_patch_info_offset == (patch_info_pc - __ pc()), "must not change");
  ce->add_call_info_here(_info);
  __ z_brcl(Assembler::bcondAlways, _patch_site_entry);
  if (_id == load_klass_id || _id == load_mirror_id || _id == load_appendix_id) {
    CodeSection* cs = __ code_section();
    address pc = (address)_pc_start;
    RelocIterator iter(cs, pc, pc + 1);
    relocInfo::change_reloc_info_for_address(&iter, (address) pc, reloc_type, relocInfo::none);
  }
}