void Relocation::pd_set_call_destination(address x) {
  assert(is_call(), "should be a call here");
  if (NativeCall::is_call_at(addr())) {
    address trampoline = nativeCall_at(addr())->get_trampoline();
    if (trampoline) {
      nativeCall_at(addr())->set_destination_mt_safe(x, /* assert_lock */false);
      return;
    }
  }
  MacroAssembler::pd_patch_instruction(addr(), x);
  assert(pd_call_destination(addr()) == x, "fail in reloc");
}
예제 #2
0
// MT-safe patching of a call instruction (and following word).
// First patches the second word, and then atomicly replaces
// the first word with the first new instruction word.
// Other processors might briefly see the old first word
// followed by the new second word.  This is OK if the old
// second word is harmless, and the new second word may be
// harmlessly executed in the delay slot of the call.
void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
  assert(Patching_lock->is_locked() ||
         SafepointSynchronize::is_at_safepoint(), "concurrent code patching");
   assert (instr_addr != NULL, "illegal address for code patching");
   NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call
   assert(NativeCall::instruction_size == 8, "wrong instruction size; must be 8");
   int i0 = ((int*)code_buffer)[0];
   int i1 = ((int*)code_buffer)[1];
   int* contention_addr = (int*) n_call->addr_at(1*BytesPerInstWord);
   assert(inv_op(*contention_addr) == Assembler::arith_op ||
          *contention_addr == nop_instruction(),
          "must not interfere with original call");
   // The set_long_at calls do the ICacheInvalidate so we just need to do them in reverse order
   n_call->set_long_at(1*BytesPerInstWord, i1);
   n_call->set_long_at(0*BytesPerInstWord, i0);
   // NOTE:  It is possible that another thread T will execute
   // only the second patched word.
   // In other words, since the original instruction is this
   //    call patching_stub; nop                   (NativeCall)
   // and the new sequence from the buffer is this:
   //    sethi %hi(K), %r; add %r, %lo(K), %r      (NativeMovConstReg)
   // what T will execute is this:
   //    call patching_stub; add %r, %lo(K), %r
   // thereby putting garbage into %r before calling the patching stub.
   // This is OK, because the patching stub ignores the value of %r.

   // Make sure the first-patched instruction, which may co-exist
   // briefly with the call, will do something harmless.
   assert(inv_op(*contention_addr) == Assembler::arith_op ||
          *contention_addr == nop_instruction(),
          "must not interfere with original call");
}
예제 #3
0
CompiledIC::CompiledIC(Relocation* ic_reloc)
  : _ic_call(nativeCall_at(ic_reloc->addr())),
    _oops(parse_ic(ic_reloc->code(), ic_reloc->addr(), _oop_addr, &_is_optimized))
{
  assert(ic_reloc->type() == relocInfo::virtual_call_type ||
         ic_reloc->type() == relocInfo::opt_virtual_call_type, "wrong reloc. info");
}
예제 #4
0
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
  address pc = (address) inst;
  if (inst->is_call()) {
    // NOTE: for call without a mov, the offset must fit a 32-bit immediate
    //       see also CompilerToVM.getMaxCallTargetOffset()
    NativeCall* call = nativeCall_at(pc);
    call->set_destination((address) foreign_call_destination);
    _instructions->relocate(call->instruction_address(), runtime_call_Relocation::spec(), Assembler::call32_operand);
  } else if (inst->is_mov_literal64()) {
    NativeMovConstReg* mov = nativeMovConstReg_at(pc);
    mov->set_data((intptr_t) foreign_call_destination);
    _instructions->relocate(mov->instruction_address(), runtime_call_Relocation::spec(), Assembler::imm_operand);
  } else if (inst->is_jump()) {
    NativeJump* jump = nativeJump_at(pc);
    jump->set_jump_destination((address) foreign_call_destination);
    _instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec(), Assembler::call32_operand);
  } else if (inst->is_cond_jump()) {
    address old_dest = nativeGeneralJump_at(pc)->jump_destination();
    address disp = Assembler::locate_operand(pc, Assembler::call32_operand);
    *(jint*) disp += ((address) foreign_call_destination) - old_dest;
    _instructions->relocate(pc, runtime_call_Relocation::spec(), Assembler::call32_operand);
  } else {
    JVMCI_ERROR("unsupported relocation for foreign call");
  }

  TRACE_jvmci_3("relocating (foreign call)  at " PTR_FORMAT, p2i(inst));
}
address Relocation::pd_call_destination(address orig_addr) {
  intptr_t adj = 0;
  address inst_loc = addr();

  if (orig_addr != NULL) {
    // We just moved this call instruction from orig_addr to addr().
    // This means its target will appear to have grown by addr() - orig_addr.
    adj = -(inst_loc - orig_addr);
  }
  if (NativeFarCall::is_far_call_at(inst_loc)) {
    NativeFarCall* call = nativeFarCall_at(inst_loc);
    return call->destination() + (intptr_t)(call->is_pcrelative() ? adj : 0);
  } else if (NativeJump::is_jump_at(inst_loc)) {
    NativeJump* jump = nativeJump_at(inst_loc);
    return jump->jump_destination() + (intptr_t)(jump->is_pcrelative() ? adj : 0);
  } else if (NativeConditionalFarBranch::is_conditional_far_branch_at(inst_loc)) {
    NativeConditionalFarBranch* branch = NativeConditionalFarBranch_at(inst_loc);
    return branch->branch_destination();
  } else {
    // There are two instructions at the beginning of a stub, therefore we
    // load at orig_addr + 8.
    orig_addr = nativeCall_at(inst_loc)->get_trampoline();
    if (orig_addr == NULL) {
      return (address) -1;
    } else {
      return (address) nativeMovConstReg_at(orig_addr + 8)->data();
    }
  }
}
예제 #6
0
void Relocation::pd_set_call_destination(address x) {
  NativeInstruction* ni = nativeInstruction_at(addr());
  if (ni->is_call()) {
    nativeCall_at(addr())->set_destination(x);
  } else if (ni->is_jump()) {
    NativeJump* nj = nativeJump_at(addr());

    // Unresolved jumps are recognized by a destination of -1
    // However 64bit can't actually produce such an address
    // and encodes a jump to self but jump_destination will
    // return a -1 as the signal. We must not relocate this
    // jmp or the ic code will not see it as unresolved.

    if (nj->jump_destination() == (address) -1) {
      x = addr(); // jump to self
    }
    nj->set_jump_destination(x);
  } else if (ni->is_cond_jump()) {
    // %%%% kludge this, for now, until we get a jump_destination method
    address old_dest = nativeGeneralJump_at(addr())->jump_destination();
    address disp = Assembler::locate_operand(addr(), Assembler::call32_operand);
    *(jint*)disp += (x - old_dest);
  } else if (ni->is_mov_literal64()) {
    ((NativeMovConstReg*)ni)->set_data((intptr_t)x);
  } else {
    ShouldNotReachHere();
  }
}
예제 #7
0
// Release the CompiledICHolder* associated with this call site is there is one.
void CompiledIC::cleanup_call_site(virtual_call_Relocation* call_site) {
  // This call site might have become stale so inspect it carefully.
  NativeCall* call = nativeCall_at(call_site->addr());
  if (is_icholder_entry(call->destination())) {
    NativeMovConstReg* value = nativeMovConstReg_at(call_site->cached_value());
    InlineCacheBuffer::queue_for_release((CompiledICHolder*)value->data());
  }
}
예제 #8
0
void CodeInstaller::pd_relocate_JavaMethod(Handle hotspot_method, jint pc_offset, TRAPS) {
#ifdef ASSERT
  Method* method = NULL;
  // we need to check, this might also be an unresolved method
  if (hotspot_method->is_a(HotSpotResolvedJavaMethodImpl::klass())) {
    method = getMethodFromHotSpotMethod(hotspot_method());
  }
#endif
  switch (_next_call_type) {
    case INLINE_INVOKE:
      break;
    case INVOKEVIRTUAL:
    case INVOKEINTERFACE: {
      assert(method == NULL || !method->is_static(), "cannot call static method with invokeinterface");

      NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
      call->set_destination(SharedRuntime::get_resolve_virtual_call_stub());
      _instructions->relocate(call->instruction_address(),
                                             virtual_call_Relocation::spec(_invoke_mark_pc),
                                             Assembler::call32_operand);
      break;
    }
    case INVOKESTATIC: {
      assert(method == NULL || method->is_static(), "cannot call non-static method with invokestatic");

      NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
      call->set_destination(SharedRuntime::get_resolve_static_call_stub());
      _instructions->relocate(call->instruction_address(),
                                             relocInfo::static_call_type, Assembler::call32_operand);
      break;
    }
    case INVOKESPECIAL: {
      assert(method == NULL || !method->is_static(), "cannot call static method with invokespecial");
      NativeCall* call = nativeCall_at(_instructions->start() + pc_offset);
      call->set_destination(SharedRuntime::get_resolve_opt_virtual_call_stub());
      _instructions->relocate(call->instruction_address(),
                              relocInfo::opt_virtual_call_type, Assembler::call32_operand);
      break;
    }
    default:
      JVMCI_ERROR("invalid _next_call_type value");
      break;
  }
}
CompiledIC::CompiledIC(RelocIterator* iter)
  : _ic_call(nativeCall_at(iter->addr()))
{
  address ic_call = _ic_call->instruction_address();

  nmethod* nm = iter->code();
  assert(ic_call != NULL, "ic_call address must be set");
  assert(nm != NULL, "must pass nmethod");
  assert(nm->contains(ic_call), "must be in nmethod");

  initialize_from_iter(iter);
}
예제 #10
0
address Relocation::pd_call_destination() {
  NativeInstruction* ni = nativeInstruction_at(addr());
  if (ni->is_call())
    return nativeCall_at(addr())->destination();
#if 0
  else if (ni->is_jump())
    return nativeJump_at(addr())->jump_destination();
  else if (ni->is_cond_jump())
    return nativeGeneralJump_at(addr())->jump_destination();
  else
#endif  /* excise for now */
    { ShouldNotReachHere(); return NULL; }
}
예제 #11
0
void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination, TRAPS) {
  address pc = (address) inst;
  if (inst->is_call()) {
    NativeCall* call = nativeCall_at(pc);
    call->set_destination((address) foreign_call_destination);
    _instructions->relocate(call->instruction_address(), runtime_call_Relocation::spec());
  } else if (inst->is_sethi()) {
    NativeJump* jump = nativeJump_at(pc);
    jump->set_jump_destination((address) foreign_call_destination);
    _instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec());
  } else {
    JVMCI_ERROR("unknown call or jump instruction at " PTR_FORMAT, p2i(pc));
  }
  TRACE_jvmci_3("relocating (foreign call) at " PTR_FORMAT, p2i(inst));
}
예제 #12
0
inline void CodeInstaller::pd_relocate_ForeignCall(NativeInstruction* inst, jlong foreign_call_destination) {
  address pc = (address) inst;
  if (inst->is_call()) {
    NativeCall* call = nativeCall_at(pc);
    call->set_destination((address) foreign_call_destination);
    _instructions->relocate(call->instruction_address(), runtime_call_Relocation::spec());
  } else if (inst->is_sethi()) {
    NativeJump* jump = nativeJump_at(pc);
    jump->set_jump_destination((address) foreign_call_destination);
    _instructions->relocate(jump->instruction_address(), runtime_call_Relocation::spec());
  } else {
    fatal(err_msg("unknown call or jump instruction at %p", pc));
  }
  TRACE_graal_3("relocating (foreign call) at %p", inst);
}
예제 #13
0
void Relocation::pd_set_call_destination(address x) {
  if (NativeCall::is_call_at(addr())) {
    NativeCall* call = nativeCall_at(addr());
    call->set_destination(x);
    return;
  }
  if (NativeFarCall::is_call_at(addr())) {
    NativeFarCall* call = nativeFarCall_at(addr());
    call->set_destination(x);
    return;
  }
  // Special case:  Patchable branch local to the code cache.
  // This will break badly if the code cache grows larger than a few Mb.
  NativeGeneralJump* br = nativeGeneralJump_at(addr());
  br->set_jump_destination(x);
}
예제 #14
0
void Relocation::pd_set_call_destination(address x, intptr_t off) {
  NativeInstruction* ni = nativeInstruction_at(addr());
  if (ni->is_call())
    nativeCall_at(addr())->set_destination(x);
#if 0 /* excise for now */
  else if (ni->is_jump())
    nativeJump_at(addr())->set_jump_destination(x);
  else if (ni->is_cond_jump()) {
    // %%%% kludge this, for now, until we get a jump_destination method
    address old_dest = nativeGeneralJump_at(addr())->jump_destination();
    address disp = Assembler::locate_operand(addr(), Assembler::call32_operand);
    *(jint*)disp += (x - old_dest);
  }
#endif  /* excise for now */
  else
    { ShouldNotReachHere(); }
}
void Relocation::pd_set_call_destination(address x) {
  address inst_loc = addr();

  if (NativeFarCall::is_far_call_at(inst_loc)) {
    NativeFarCall* call = nativeFarCall_at(inst_loc);
    call->set_destination(x);
  } else if (NativeJump::is_jump_at(inst_loc)) {
    NativeJump* jump= nativeJump_at(inst_loc);
    jump->set_jump_destination(x);
  } else if (NativeConditionalFarBranch::is_conditional_far_branch_at(inst_loc)) {
    NativeConditionalFarBranch* branch = NativeConditionalFarBranch_at(inst_loc);
    branch->set_branch_destination(x);
  } else {
    NativeCall* call = nativeCall_at(inst_loc);
    call->set_destination_mt_safe(x, false);
  }
}
address Relocation::pd_call_destination(address orig_addr) {
  assert(is_call(), "should be a call here");
  if (NativeCall::is_call_at(addr())) {
    address trampoline = nativeCall_at(addr())->get_trampoline();
    if (trampoline) {
      return nativeCallTrampolineStub_at(trampoline)->destination();
    }
  }
  if (orig_addr != NULL) {
    address new_addr = MacroAssembler::pd_call_destination(orig_addr);
    // If call is branch to self, don't try to relocate it, just leave it
    // as branch to self. This happens during code generation if the code
    // buffer expands. It will be relocated to the trampoline above once
    // code generation is complete.
    new_addr = (new_addr == orig_addr) ? addr() : new_addr;
    return new_addr;
  }
  return MacroAssembler::pd_call_destination(addr());
}
예제 #17
0
void Relocation::pd_set_call_destination(address x) {
  address inst_addr = addr();

  if (NativeFarCall::is_far_call_at(inst_addr)) {
    if (!ShortenBranches) {
      if (MacroAssembler::is_call_far_pcrelative(inst_addr)) {
        address a1 = MacroAssembler::get_target_addr_pcrel(inst_addr+MacroAssembler::nop_size());
#ifdef ASSERT
        address a3 = nativeFarCall_at(inst_addr)->destination();
        if (a1 != a3) {
          unsigned int range = 128;
          Assembler::dump_code_range(tty, inst_addr, range, "pc-relative call w/o ShortenBranches?");
          assert(false, "pc-relative call w/o ShortenBranches?");
        }
#endif
        nativeFarCall_at(inst_addr)->set_destination(x, 0);
        return;
      }
      assert(x == (address)-1, "consistency check");
      return;
    }
    int toc_offset = -1;
    if (type() == relocInfo::runtime_call_w_cp_type) {
      toc_offset = ((runtime_call_w_cp_Relocation *)this)->get_constant_pool_offset();
    }
    if (toc_offset>=0) {
      NativeFarCall* call = nativeFarCall_at(inst_addr);
      call->set_destination(x, toc_offset);
      return;
    }
  }

  if (NativeCall::is_call_at(inst_addr)) {
    NativeCall* call = nativeCall_at(inst_addr);
    if (call->is_pcrelative()) {
      call->set_destination_mt_safe(x);
      return;
    }
  }

  // constant is absolute, must use x
  nativeMovConstReg_at(inst_addr)->set_data(((intptr_t)x));
}
예제 #18
0
address Relocation::pd_call_destination(address orig_addr) {
  address   inst_addr = addr();

  if (NativeFarCall::is_far_call_at(inst_addr)) {
    if (!ShortenBranches) {
      if (MacroAssembler::is_call_far_pcrelative(inst_addr)) {
        address a1 = MacroAssembler::get_target_addr_pcrel(orig_addr+MacroAssembler::nop_size());
#ifdef ASSERT
        address a2 = MacroAssembler::get_target_addr_pcrel(inst_addr+MacroAssembler::nop_size());
        address a3 = nativeFarCall_at(orig_addr)->destination();
        address a4 = nativeFarCall_at(inst_addr)->destination();
        if ((a1 != a3) || (a2 != a4)) {
          unsigned int range = 128;
          Assembler::dump_code_range(tty, inst_addr, range, "pc-relative call w/o ShortenBranches?");
          Assembler::dump_code_range(tty, orig_addr, range, "pc-relative call w/o ShortenBranches?");
          assert(false, "pc-relative call w/o ShortenBranches?");
        }
#endif
        return a1;
      }
      return (address)(-1);
    }
    NativeFarCall* call;
    if (orig_addr == NULL) {
      call = nativeFarCall_at(inst_addr);
    } else {
      // must access location (in CP) where destination is stored in unmoved code, because load from CP is pc-relative
      call = nativeFarCall_at(orig_addr);
    }
    return call->destination();
  }

  if (NativeCall::is_call_at(inst_addr)) {
    NativeCall* call = nativeCall_at(inst_addr);
    if (call->is_pcrelative()) {
      intptr_t off = inst_addr - orig_addr;
      return (address) (call->destination()-off);
    }
  }

  return (address) nativeMovConstReg_at(inst_addr)->data();
}
예제 #19
0
address Relocation::pd_call_destination(address orig_addr) {
  intptr_t adj = 0;
  if (orig_addr != NULL) {
    // We just moved this call instruction from orig_addr to addr().
    // This means its target will appear to have grown by addr() - orig_addr.
    adj = -( addr() - orig_addr );
  }
  if (NativeCall::is_call_at(addr())) {
    NativeCall* call = nativeCall_at(addr());
    return call->destination() + adj;
  }
  if (NativeFarCall::is_call_at(addr())) {
    NativeFarCall* call = nativeFarCall_at(addr());
    return call->destination() + adj;
  }
  // Special case:  Patchable branch local to the code cache.
  // This will break badly if the code cache grows larger than a few Mb.
  NativeGeneralJump* br = nativeGeneralJump_at(addr());
  return br->jump_destination() + adj;
}
예제 #20
0
address Relocation::pd_call_destination(address orig_addr) {
  intptr_t adj = 0;
  if (orig_addr != NULL) {
    // We just moved this call instruction from orig_addr to addr().
    // This means its target will appear to have grown by addr() - orig_addr.
    adj = -( addr() - orig_addr );
  }
  NativeInstruction* ni = nativeInstruction_at(addr());
  if (ni->is_call()) {
    return nativeCall_at(addr())->destination() + adj;
  } else if (ni->is_jump()) {
    return nativeJump_at(addr())->jump_destination() + adj;
  } else if (ni->is_cond_jump()) {
    return nativeGeneralJump_at(addr())->jump_destination() + adj;
  } else if (ni->is_mov_literal64()) {
    return (address) ((NativeMovConstReg*)ni)->data();
  } else {
    ShouldNotReachHere();
    return NULL;
  }
}
// MT-safe patching of a call instruction.
// First patches first word of instruction to two jmp's that jmps to them
// selfs (spinlock). Then patches the last byte, and then atomicly replaces
// the jmp's with the first 4 byte of the new instruction.
void NativeCall::replace_mt_safe(address instr_addr, address code_buffer) {
  assert(Patching_lock->is_locked() ||
         SafepointSynchronize::is_at_safepoint(), "concurrent code patching"); 
   assert (instr_addr != NULL, "illegal address for code patching");
#ifdef ASSERT
   NativeCall* n_call =  nativeCall_at (instr_addr); // checking that it is a call 
   if (os::is_MP()) {
     assert(((intx)instr_addr % BytesPerWord == 0), "must be aligned");
   }
#endif // ASSERT

   // Tempoary code
   unsigned char patch[4];
   assert(sizeof(patch)==sizeof(jint), "sanity check");
   patch[0] = 0xEB;       // jmp rel8
   patch[1] = 0xFE;       // jmp to self
   patch[2] = 0xEB;
   patch[3] = 0xFE;
   
   // First patch dummy jmp in place
   *(jint*)instr_addr = *(jint *)patch;

   // Patch 4th byte
   address byte_4_adr = instr_addr+4;
   *byte_4_adr = code_buffer[4];

   // Patch bytes 0-3
   *(jint*)instr_addr = *(jint *)code_buffer;  

#ifdef ASSERT
   // verify patching
   for ( int i = 0; i < NativeCall::instruction_size; i++) { // bytewise comparing
     address ptr = (address)((int)code_buffer + i);     
     int a_byte = (*ptr) & 0xFF;
     assert(*((address)((int)instr_addr + i)) == a_byte, "mt safe patching failed");
   }
#endif

   ICache::invalidate_range(instr_addr, NativeCall::instruction_size);
}
예제 #22
0
// Code for unit testing implementation of NativeCall class
void NativeCall::test() {
#ifdef ASSERT
  ResourceMark rm;
  CodeBuffer cb("test", 100, 100);
  MacroAssembler* a = new MacroAssembler(&cb);
  NativeCall  *nc;
  uint idx;
  int offsets[] = {
    0x0,
    0xfffffff0,
    0x7ffffff0,
    0x80000000,
    0x20,
    0x4000,
  };

  VM_Version::allow_all();

  a->call( a->pc(), relocInfo::none );
  a->delayed()->nop();
  nc = nativeCall_at( cb.insts_begin() );
  nc->print();

  nc = nativeCall_overwriting_at( nc->next_instruction_address() );
  for (idx = 0; idx < ARRAY_SIZE(offsets); idx++) {
    nc->set_destination( cb.insts_begin() + offsets[idx] );
    assert(nc->destination() == (cb.insts_begin() + offsets[idx]), "check unit test");
    nc->print();
  }

  nc = nativeCall_before( cb.insts_begin() + 8 );
  nc->print();

  VM_Version::revert();
#endif
}
// --- build_repack_buffer ---------------------------------------------------
// Build a IFrame structure to help ASM code repack the 1 compiled frame into
// many interpreter (or C1) frames.  Takes in the current thread and a vframe;
// the vframe is pointing and the virtual Java frame needing to be repacked.
// It takes in the callee (which this frame is busy trying to call in it's
// inlined code), and an array of IFrames.  It returns the updated IFrame
// buffer filled in for this frame.
void Deoptimization::build_repack_buffer( JavaThread *thread, frame fr, IFrame *buf, const DebugMap *dm, const DebugScope *ds, intptr_t *jexstk, objectRef *lckstk, bool is_deopt, bool is_c1, bool is_youngest) {
  assert( thread->_deopt_buffer->contains((char*)(buf+1)), "over-ran large deopt buffer?" );

int bci=ds->bci();
if(bci==InvocationEntryBci){
    // We deoptimized while hanging in prologue code for a synchronized
    // method.  We got the lock (after all, deopt happens after returning
    // from the blocking call).  We want to begin execution in the
    // interpreter at BCI 0, and after taking the lock.
    // Also it is possilble to enter the deopt code through the br_s on method
    // entry before the first byte code.
    bci = 0;
  }

  const methodOop moop = ds->method().as_methodOop();
  if( ds->caller() ) {          // Do I have a caller?  Am I mid-call?
    // Initialize the constant pool entry for caller-parameter size.  It
    // might be the case that we inlined and compiled a callee, and are busy
    // calling it in the compiled code, and get deoptimized with that callee
    // in-progress AND we've never executed it in the interpreter - which
    // would have filled in the constant pool cache before making the call.
    // Fill it in now.
    const methodOop caller = ds->caller()->method().as_methodOop();
    int index = Bytes::get_native_u2(caller->bcp_from(ds->caller()->bci())+1);
    ConstantPoolCacheEntry *cpe = caller->constants()->cache()->entry_at(index);
    // Since we are setting the constant pool entry here, and another thread
    // could be busy resolving here we have a race condition setting the
    // flags.  Use a CAS to only set the flags if they are currently 0.
    intx *flags_adr = (intx*)((intptr_t)cpe + in_bytes(ConstantPoolCacheEntry::flags_offset()));
    if( !*flags_adr ) {         // Flags currently 0?
      // Set the flags, because the interpreter-return-entry points need some
      // info from them.  Not all fields are set, because it's too complex to
      // do it here... and not needed.  The cpCacheEntry is left "unresolved"
      // such that the next real use of it from the interpreter will be forced
      // to do a proper resolve, which will fill in the missing fields.

      // Compute new flags needed by the interpreter-return-entry
      intx flags = 
        (moop->size_of_parameters() & 0xFF) | 
        (1 << ConstantPoolCacheEntry::hotSwapBit) |
        (moop->result_type() << ConstantPoolCacheEntry::tosBits);
      // CAS 'em in, but only if there is currently a 0 flags
      assert0( sizeof(jlong)==sizeof(intx) );
      Atomic::cmpxchg((jlong)flags, (jlong*)flags_adr, 0);
      // We don't care about the result, because the cache is monomorphic.
      // Either our CAS succeeded and jammed    the right parameter count, or
      // another thread succeeded and jammed in the right parameter count.
    } 
  }

  if (TraceDeoptimization) {
    BufferedLoggerMark m(NOTAG, Log::M_DEOPT, TraceDeoptimization, true);
    m.out("DEOPT REPACK c%d: ", is_c1 ? 1 : 2);
    moop->print_short_name(m.stream());
    m.out(" @ bci %d %s", bci, ds->caller() ? "called by...": "   (oldest frame)" );
  }

  // If there was a suitable C1 frame, use it.
  // Otherwise, use an interpreter frame.
  if( 1 ) {
    // Build an interpreter-style IFrame.  Naked oops abound.
    assert0( !objectRef(moop).is_stack() );
    buf->_mref = objectRef(moop);
    buf->_cpc = moop->constants()->cacheRef();

    // Compute monitor list length.  If we have coarsened a lock we will end
    // up unlocking it and the repack buffer will not need to see it.
    uint mons_len = ds->numlocks();
    if( ds->is_extra_lock() ) { mons_len--; assert0( mons_len >= 0 ); }
    assert0( mons_len < (256*sizeof(buf->_numlck)) );
    buf->_numlck = mons_len;
    
    // Set up the return pc for the next frame: the next frame is a younger
    // frame which will return to this older frame.  All middle frames return
    // back into the interpreter, just after a call with proper TOS state.
    // Youngest frames always start in vtos state because the uncommon-trap
    // blob sets them up that way.
    const address bcp = moop->bcp_from(bci);
    Bytecodes::Code c = Bytecodes::java_code(Bytecodes::cast(*bcp));
BasicType return_type=T_VOID;

    bool handle_popframe = is_youngest && JvmtiExport::can_pop_frame() && thread->popframe_forcing_deopt_reexecution();

    int bci_bump = 0;
    if( !is_youngest ) {        // Middle-frame?
      bool from_call = (c == Bytecodes::_invokevirtual ||
c==Bytecodes::_invokespecial||
c==Bytecodes::_invokestatic||
                        c == Bytecodes::_invokeinterface );
assert(from_call,"Middle frame is in the middle of a call");
      bci_bump = Bytecodes::length_at(bcp); // But need to know how much it will be bumped for the return address
      buf->_bci = bci;          // Save bci without bumping it; normal interpreter call returns bump the bci as needed
      buf[-1]._retadr = Interpreter::return_entry(vtos, bci_bump);

    } else if( thread->pending_exception() ) { 
      // Deopt-with-pending.  Throw up on return to interpreter, which is
      // handled by unpack_and_go.
buf->_bci=bci;
      buf[-1]._retadr = Interpreter::unpack_and_go();

    } else if( !is_deopt ) {    // It is a C2-style uncommon-trap.
      // Do NOT increment the BCP!  We are re-executing the current bytecode.
buf->_bci=bci;
      buf[-1]._retadr = Interpreter::unpack_and_go();
      
    } else {                    // It is a plain deopt
      // It is a deopt without exception.  See if we are C1 in mid-patch.
      // If so, we always need to re-execute the bytecode.
      bool is_C1_mid_patch = false;
      if( is_c1 ) {             // C1 codeblob?
address caller_pc=fr.pc();
if(NativeCall::is_call_before(caller_pc)){
          address target = nativeCall_at(caller_pc)->destination();
          is_C1_mid_patch = target == Runtime1::entry_for(Runtime1::load_klass_patching_id);
        }
      }
      if( is_C1_mid_patch ) {
        Untested("");
        // Do NOT increment the BCP!  We are re-executing the current bytecode.
      } else if( ds->bci() == InvocationEntryBci ) {
        // It is deopt while hanging on a method-entry lock.
        // Do not advance BCP, as we have not executed bci 0 yet.
        
      } else {                  // Else C2 or C1-not-mid-patch
        // It is a deopt.  Whether we re-execute the current bytecode or
        // assume it has completed depends on the bytecode.
        switch( c ) {
case Bytecodes::_lookupswitch:
case Bytecodes::_tableswitch:
case Bytecodes::_fast_binaryswitch:
        case Bytecodes::_fast_linearswitch:
          // recompute condtional expression folded into _if<cond>
        case Bytecodes::_lcmp      :
        case Bytecodes::_fcmpl     :
        case Bytecodes::_fcmpg     :
        case Bytecodes::_dcmpl     :
        case Bytecodes::_dcmpg     :
        case Bytecodes::_ifnull    :
        case Bytecodes::_ifnonnull :
        case Bytecodes::_goto      :
        case Bytecodes::_goto_w    :
        case Bytecodes::_ifeq      :
        case Bytecodes::_ifne      :
        case Bytecodes::_iflt      :
        case Bytecodes::_ifge      :
        case Bytecodes::_ifgt      :
        case Bytecodes::_ifle      :
        case Bytecodes::_if_icmpeq :
        case Bytecodes::_if_icmpne :
        case Bytecodes::_if_icmplt :
        case Bytecodes::_if_icmpge :
        case Bytecodes::_if_icmpgt :
        case Bytecodes::_if_icmple :
        case Bytecodes::_if_acmpeq :
        case Bytecodes::_if_acmpne :
          // special cases
case Bytecodes::_aastore:
          // We are re-executing the current bytecode.
          Untested("");
          break;
          // special cases
case Bytecodes::_putstatic:
case Bytecodes::_getstatic:
case Bytecodes::_getfield:
case Bytecodes::_putfield:
          // We are re-executing the current bytecode.
          break;
        case Bytecodes::_athrow    :
          break;                // Must be deopt-w-exception
case Bytecodes::_invokevirtual:
case Bytecodes::_invokespecial:
case Bytecodes::_invokestatic:{
methodHandle mh(thread,moop);
return_type=Bytecode_invoke_at(mh,bci)->result_type(thread);
          if( !handle_popframe &&
              !ds->should_reexecute()) 
            bci_bump = 3; // Increment the BCP to post-call!!!  See below!
          break;
        }
case Bytecodes::_invokeinterface:{
methodHandle mh(thread,moop);
return_type=Bytecode_invoke_at(mh,bci)->result_type(thread);
          if( !handle_popframe &&
              !ds->should_reexecute()) 
            bci_bump = 5; // Increment the BCP to post-call!!!  See below!
          break;
        }
        case Bytecodes::_ldc   : 
          Untested("");
return_type=constant_pool_type(moop,*(bcp+1));
          if( !ds->should_reexecute()) bci_bump = 2; // Increment the BCP to post-call!!!  See below!
          break;
          
        case Bytecodes::_ldc_w : // fall through
        case Bytecodes::_ldc2_w: 
return_type=constant_pool_type(moop,Bytes::get_Java_u2(bcp+1));
          if( !ds->should_reexecute()) bci_bump = 3; // Increment the BCP to post-call!!!  See below!
          break;
          
        default:
return_type=Bytecodes::result_type(c);
          if( !ds->should_reexecute()) bci_bump = Bytecodes::length_at(bcp); // Increment the BCP to post-call!!!  See below!
          break;
        }
        if (ds->should_reexecute()) return_type = T_VOID;
      }
      // Save (possibly advanced) bci
      buf->_bci = bci+bci_bump;
      buf[-1]._retadr = Interpreter::unpack_and_go(); // Interpreter::return_entry(vtos, bci_bump);
    }

    // ---
    // Now all the Java locals.
    // First set the start of locals for the interpreter frame we are building.
    buf->_loc = (intptr_t)jexstk;

    uint loc_len = moop->max_locals();
for(uint i=0;i<loc_len;i++){
      *jexstk++ = dm->get_value(ds->get_local(i),fr);
    }

    // Now that the locals have been unpacked if we have any deferred local writes
    // added by jvmti then we can free up that structure as the data is now in the
    // buffer
    GrowableArray<jvmtiDeferredLocalVariableSet*>* list = thread->deferred_locals();
    if( list ) {
      // Because of inlining we could have multiple vframes for a single frame
      // and several of the vframes could have deferred writes. Find them all.
      Unimplemented();
    }

    // ---
    // Now all the Java Expressions
    uint expr_len = ds->numstk();
for(uint i=0;i<expr_len;i++)
      *jexstk++ = dm->get_value(ds->get_expr(i),fr);

    // If returning from a deoptimized call, we will have return values in
    // registers that need to end up on the Java execution stack.  They are
    // not recorded in the debug info, since they did not exist at the time
    // the call began.
    if( is_youngest && is_deopt ) { 
      if( type2size[return_type] > 0 ) {
        if( type2size[return_type]==2 ) {
          *jexstk++ = (intptr_t)frame::double_slot_primitive_type_empty_slot_id << 32;
        }
        *jexstk++ = pd_fetch_return_values( thread, return_type );
        // Need to adjust the final jexstk_top for the youngest frame
        // returning values.  These returned values are not accounted for in
        // the standard debug info.
        thread->_jexstk_top = jexstk;
      }
    }

    // JVMTI PopFrame support
    // Add the number of words of popframe preserved args to expr_len
    int popframe_preserved_args_size_in_bytes = in_bytes(thread->popframe_preserved_args_size());
    int popframe_preserved_args_size_in_words = in_words(thread->popframe_preserved_args_size_in_words());
    if (handle_popframe) {
      Unimplemented();
      expr_len += popframe_preserved_args_size_in_words;
      // An interpreted frame was popped but it returns to a deoptimized
      // frame. The incoming arguments to the interpreted activation
      // were preserved in thread-local storage by the
      // remove_activation_preserving_args_entry in the interpreter; now
      // we put them back into the just-unpacked interpreter frame.
      // Note that this assumes that the locals arena grows toward lower
      // addresses.
    }

    // Set the JEX stk top
    buf->_stk = (intptr_t)jexstk;

    // --- 
    // Now move locked objects to the interpreters lock-stack.
    // No need to inflate anything, as we're moving standard oops.
    int numlcks = ds->numlocks();
    if( ds->is_extra_lock() ) { // coarsened a lock
      Untested("");
      // The last lock is "coarsened" - kept locked when it should have been
      // unlocked and relocked.  With no deopt, keeping it locked saves the 2
      // sets of back-to-back CAS's and fences.  However, here we need to
      // unlock it to match the proper Java state.
      ObjectSynchronizer::unlock(ALWAYS_POISON_OBJECTREF((objectRef)dm->get_value(ds->get_lock(numlcks-1),fr)).as_oop());
      numlcks--;
    }
for(int i=0;i<numlcks;i++){
      *lckstk++ = ALWAYS_POISON_OBJECTREF((objectRef)dm->get_value(ds->get_lock(i),fr));
    }

  } else {                    // Make a C1 frame
    
    Unimplemented();
    
  }
}
예제 #24
0
bool CompiledIC::is_icholder_call_site(virtual_call_Relocation* call_site) {
  // This call site might have become stale so inspect it carefully.
  NativeCall* call = nativeCall_at(call_site->addr());
  return is_icholder_entry(call->destination());
}