// Abstract method entry
// Attempt to execute abstract method. Throw exception
//
address InterpreterGenerator::generate_abstract_entry(void) {
  address entry = __ pc();
  // abstract method entry
  // throw exception
  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
  // the call_VM checks for exception, so we should never return here.
  __ should_not_reach_here();
  return entry;

}
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
  address entry = __ pc();
  // rbx,: method
  // rcx: temporary
  // rdi: pointer to locals
  // rsp: end of copied parameters area
  __ mov(rcx, rsp);
  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx);
  __ ret(0);
  return entry;
}
Exemple #3
0
  //------------------------------------------------------------------------------------------------------------------------
  // Continuation point for throwing of implicit exceptions that are not handled in
  // the current activation. Fabricates an exception oop and initiates normal
  // exception dispatching in this frame. Only callee-saved registers are preserved
  // (through the normal register window / RegisterMap handling).
  // If the compiler needs all registers to be preserved between the fault
  // point and the exception handler then it must assume responsibility for that in
  // AbstractCompiler::continuation_for_implicit_null_exception or
  // continuation_for_implicit_division_by_zero_exception. All other implicit
  // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
  // either at call sites or otherwise assume that stack unwinding will be initiated,
  // so caller saved registers were assumed volatile in the compiler.
  //
  address generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) {
    int insts_size = VerifyThread ? 1 * K : 512;
    int locs_size  = 32;

    CodeBuffer code(name, insts_size, locs_size);
    MacroAssembler* _masm = new MacroAssembler(&code);

    const Register saved_exception_pc_addr = GR31_SCRATCH;
    const Register continuation            = GR30_SCRATCH;

    const Register saved_exception_pc      = GR31_SCRATCH;

    const BranchRegister continuation_br   = BR6_SCRATCH;

    // 4826555: nsk test stack016 fails.  See os_linux_ia64.cpp.
    // Reload register stack limit because the Linux kernel
    // doesn't reload GR4-7 from the ucontext.
    __ add(GR7_reg_stack_limit, thread_(register_stack_limit));
    __ ld8(GR7_reg_stack_limit, GR7_reg_stack_limit);

//  __ verify_thread();

    // If the exception occured at a call site target, RP contains the return address
    // (which is also the exception PC), and the call instruction has pushed a degenerate
    // register frame.  If the exception happened elsewhere, the signal handler has saved
    // the exception address in the thread state and we must execute a call instruction
    // in order to obtain a degenerate frame.  In either case, we must then push a frame
    // so we can execute a call_VM.

    if (restore_saved_exception_pc) {
      __ add(saved_exception_pc_addr, thread_(saved_exception_pc));
      __ ld8(saved_exception_pc, saved_exception_pc_addr);
      __ push_dummy_full_frame(saved_exception_pc);
    } else {
      __ push_full_frame();
    }

    // Install the exception oop in the thread state, etc.
    __ call_VM(noreg, runtime_entry);

    // Scale back to a thin frame for forward_exception.
    __ pop_full_to_thin_frame();

    // Branch to forward_exception.
    __ mova(continuation, StubRoutines::forward_exception_entry());
    __ mov(continuation_br, continuation);
    __ br(continuation_br);

    __ flush_bundle();

    RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, &code, CodeBlob::frame_never_safe, 0, NULL, false);
    return stub->entry_point();
  }
void InterpreterGenerator::generate_counter_overflow(Label& Lcontinue) {

  // Generate code to initiate compilation on the counter overflow.

  // InterpreterRuntime::frequency_counter_overflow takes two arguments,
  // the first indicates if the counter overflow occurs at a backwards branch (NULL bcp)
  // and the second is only used when the first is true.  We pass zero for both.
  // The call returns the address of the verified entry point for the method or NULL
  // if the compilation did not complete (either went background or bailed out).
  __ set((int)false, O2);
  __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), O2, O2, true);
  // returns verified_entry_point or NULL
  // we ignore it in any case
  __ ba_short(Lcontinue);

}
// Abstract method entry
// Attempt to execute abstract method. Throw exception
address InterpreterGenerator::generate_abstract_entry(void) {
  // rmethod: Method*
  // r13: sender SP

  address entry_point = __ pc();

  // abstract method entry

  //  pop return address, reset last_sp to NULL
  __ empty_expression_stack();
  __ restore_bcp();      // bcp must be correct for exception handler   (was destroyed)
  __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)

  // throw exception
  __ call_VM(noreg, CAST_FROM_FN_PTR(address,
                             InterpreterRuntime::throw_AbstractMethodError));
  // the call_VM checks for exception, so we should never return here.
  __ should_not_reach_here();

  return entry_point;
}
// These stubs are used by the compiler only.
// Argument registers, which must be preserved:
//   rcx - receiver (always first argument)
//   rdx - second argument (if any)
// Other registers that might be usable:
//   rax - inline cache register (is interface for itable stub)
//   rbx - method (used when calling out to interpreter)
// Available now, but may become callee-save at some point:
//   rsi, rdi
// Note that rax and rdx are also used for return values.
//
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  const int i486_code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
  // Can be NULL if there is no free space in the code cache.
  if (s == NULL) {
    return NULL;
  }

  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), i486_code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

#ifndef PRODUCT

  if (CountCompiledCalls) {
    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  }
#endif /* PRODUCT */

  // get receiver (need to skip return address on top of stack)
  assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx");

  // get receiver klass
  address npe_addr = __ pc();
  __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));

#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    // check offset vs vtable length
    __ cmpl(Address(rax, InstanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size());
    __ jcc(Assembler::greater, L);
    __ movl(rbx, vtable_index);
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), rcx, rbx);
    __ bind(L);
  }
#endif // PRODUCT

  const Register method = rbx;

  // load Method* and target address
  __ lookup_virtual_method(rax, vtable_index, method);

  if (DebugVtables) {
    Label L;
    __ cmpptr(method, (int32_t)NULL_WORD);
    __ jcc(Assembler::equal, L);
    __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
    __ jcc(Assembler::notZero, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  }

  // rax,: receiver klass
  // method (rbx): Method*
  // rcx: receiver
  address ame_addr = __ pc();
  __ jmp( Address(method, Method::from_compiled_offset()));

  masm->flush();

  if (PrintMiscellaneous && (WizardMode || Verbose)) {
    tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d",
                  vtable_index, s->entry_point(),
                  (int)(s->code_end() - s->entry_point()),
                  (int)(s->code_end() - __ pc()));
  }
  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
  // shut the door on sizing bugs
  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
  assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");

  s->set_exception_points(npe_addr, ame_addr);
  return s;
}
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
  address entry = __ pc();

  __ andr(esp, esp, -16);
  __ mov(c_rarg3, esp);
  // rmethod
  // rlocals
  // c_rarg3: first stack arg - wordSize

  // adjust sp
  __ sub(sp, c_rarg3, 18 * wordSize);
  __ str(lr, Address(__ pre(sp, -2 * wordSize)));
  __ call_VM(noreg,
             CAST_FROM_FN_PTR(address,
                              InterpreterRuntime::slow_signature_handler),
             rmethod, rlocals, c_rarg3);

  // r0: result handler

  // Stack layout:
  // rsp: return address           <- sp
  //      1 garbage
  //      8 integer args (if static first is unused)
  //      1 float/double identifiers
  //      8 double args
  //        stack args              <- esp
  //        garbage
  //        expression stack bottom
  //        bcp (NULL)
  //        ...

  // Restore LR
  __ ldr(lr, Address(__ post(sp, 2 * wordSize)));

  // Do FP first so we can use c_rarg3 as temp
  __ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers

  for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
    const FloatRegister r = as_FloatRegister(i);

    Label d, done;

    __ tbnz(c_rarg3, i, d);
    __ ldrs(r, Address(sp, (10 + i) * wordSize));
    __ b(done);
    __ bind(d);
    __ ldrd(r, Address(sp, (10 + i) * wordSize));
    __ bind(done);
  }

  // c_rarg0 contains the result from the call of
  // InterpreterRuntime::slow_signature_handler so we don't touch it
  // here.  It will be loaded with the JNIEnv* later.
  __ ldr(c_rarg1, Address(sp, 1 * wordSize));
  for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) {
    Register rm = as_Register(i), rn = as_Register(i+1);
    __ ldp(rm, rn, Address(sp, i * wordSize));
  }

  __ add(sp, sp, 18 * wordSize);
  __ ret(lr);

  return entry;
}
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  address entry = __ pc();

  // rbx: method
  // r14: pointer to locals
  // c_rarg3: first stack arg - wordSize
  __ mov(c_rarg3, rsp);
  // adjust rsp
  __ subptr(rsp, 4 * wordSize);
  __ call_VM(noreg,
             CAST_FROM_FN_PTR(address,
                              InterpreterRuntime::slow_signature_handler),
             rbx, r14, c_rarg3);

  // rax: result handler

  // Stack layout:
  // rsp: 3 integer or float args (if static first is unused)
  //      1 float/double identifiers
  //        return address
  //        stack args
  //        garbage
  //        expression stack bottom
  //        bcp (NULL)
  //        ...

  // Do FP first so we can use c_rarg3 as temp
  __ movl(c_rarg3, Address(rsp, 3 * wordSize)); // float/double identifiers

  for ( int i= 0; i < Argument::n_int_register_parameters_c-1; i++ ) {
    XMMRegister floatreg = as_XMMRegister(i+1);
    Label isfloatordouble, isdouble, next;

    __ testl(c_rarg3, 1 << (i*2));      // Float or Double?
    __ jcc(Assembler::notZero, isfloatordouble);

    // Do Int register here
    switch ( i ) {
      case 0:
        __ movl(rscratch1, Address(rbx, Method::access_flags_offset()));
        __ testl(rscratch1, JVM_ACC_STATIC);
        __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
        break;
      case 1:
        __ movptr(c_rarg2, Address(rsp, wordSize));
        break;
      case 2:
        __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
        break;
      default:
        break;
    }

    __ jmp (next);

    __ bind(isfloatordouble);
    __ testl(c_rarg3, 1 << ((i*2)+1));     // Double?
    __ jcc(Assembler::notZero, isdouble);

// Do Float Here
    __ movflt(floatreg, Address(rsp, i * wordSize));
    __ jmp(next);

// Do Double here
    __ bind(isdouble);
    __ movdbl(floatreg, Address(rsp, i * wordSize));

    __ bind(next);
  }


  // restore rsp
  __ addptr(rsp, 4 * wordSize);

  __ ret(0);

  return entry;
}
address TemplateInterpreterGenerator::generate_slow_signature_handler() {
  address entry = __ pc();

  // rbx: method
  // r14: pointer to locals
  // c_rarg3: first stack arg - wordSize
  __ mov(c_rarg3, rsp);
  // adjust rsp
  __ subptr(rsp, 14 * wordSize);
  __ call_VM(noreg,
             CAST_FROM_FN_PTR(address,
                              InterpreterRuntime::slow_signature_handler),
             rbx, r14, c_rarg3);

  // rax: result handler

  // Stack layout:
  // rsp: 5 integer args (if static first is unused)
  //      1 float/double identifiers
  //      8 double args
  //        return address
  //        stack args
  //        garbage
  //        expression stack bottom
  //        bcp (NULL)
  //        ...

  // Do FP first so we can use c_rarg3 as temp
  __ movl(c_rarg3, Address(rsp, 5 * wordSize)); // float/double identifiers

  for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
    const XMMRegister r = as_XMMRegister(i);

    Label d, done;

    __ testl(c_rarg3, 1 << i);
    __ jcc(Assembler::notZero, d);
    __ movflt(r, Address(rsp, (6 + i) * wordSize));
    __ jmp(done);
    __ bind(d);
    __ movdbl(r, Address(rsp, (6 + i) * wordSize));
    __ bind(done);
  }

  // Now handle integrals.  Only do c_rarg1 if not static.
  __ movl(c_rarg3, Address(rbx, Method::access_flags_offset()));
  __ testl(c_rarg3, JVM_ACC_STATIC);
  __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));

  __ movptr(c_rarg2, Address(rsp, wordSize));
  __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
  __ movptr(c_rarg4, Address(rsp, 3 * wordSize));
  __ movptr(c_rarg5, Address(rsp, 4 * wordSize));

  // restore rsp
  __ addptr(rsp, 14 * wordSize);

  __ ret(0);

  return entry;
}
address InterpreterGenerator::generate_native_entry(bool synchronized)
{
  const Register handler  = r14;
  const Register function = r15;

  assert_different_registers(Rmethod, Rlocals, Rthread, Rstate, Rmonitor,
			     handler, function);

  // We use the same code for synchronized and not
  if (native_entry)
    return native_entry;

  address start = __ pc();

  // Allocate and initialize our stack frame.
  __ load (Rstate, 0);
  generate_compute_interpreter_state(true);

  // Make sure method is native and not abstract
#ifdef ASSERT
  {
    Label ok;
    __ lwz (r0, Address(Rmethod, methodOopDesc::access_flags_offset()));
    __ andi_ (r0, r0, JVM_ACC_NATIVE | JVM_ACC_ABSTRACT);
    __ compare (r0, JVM_ACC_NATIVE);
    __ beq (ok);
    __ should_not_reach_here (__FILE__, __LINE__);
    __ bind (ok);
  }
#endif

  // Lock if necessary
  Label not_synchronized_1;
  
  __ bne (CRsync, not_synchronized_1);
  __ lock_object (Rmonitor);
  __ bind (not_synchronized_1);
  
  // Get signature handler
  const Address signature_handler_addr(
    Rmethod, methodOopDesc::signature_handler_offset());

  Label return_to_caller, got_signature_handler;

  __ load (handler, signature_handler_addr);
  __ compare (handler, 0);
  __ bne (got_signature_handler);
  __ call_VM (noreg,
              CAST_FROM_FN_PTR(address,
                               InterpreterRuntime::prepare_native_call),
              Rmethod,
              CALL_VM_NO_EXCEPTION_CHECKS);
  __ load (r0, Address(Rthread, Thread::pending_exception_offset()));
  __ compare (r0, 0);
  __ bne (return_to_caller);
  __ load (handler, signature_handler_addr);
  __ bind (got_signature_handler); 

  // Get the native function entry point
  const Address native_function_addr(
    Rmethod, methodOopDesc::native_function_offset());

  Label got_function;

  __ load (function, native_function_addr);
#ifdef ASSERT
  {
    // InterpreterRuntime::prepare_native_call() sets the mirror
    // handle and native function address first and the signature
    // handler last, so function should always be set here.
    Label ok;
    __ compare (function, 0);
    __ bne (ok);
    __ should_not_reach_here (__FILE__, __LINE__);
    __ bind (ok);
  }
#endif

  // Call signature handler
  __ mtctr (handler);
  __ bctrl ();
  __ mr (handler, r0);

  // Pass JNIEnv
  __ la (r3, Address(Rthread, JavaThread::jni_environment_offset()));

  // Pass mirror handle if static
  const Address oop_temp_addr = STATE(_oop_temp);

  Label not_static;

  __ bne (CRstatic, not_static);
  __ get_mirror_handle (r4);
  __ store (r4, oop_temp_addr);
  __ la (r4, oop_temp_addr);
  __ bind (not_static);

  // Set up the Java frame anchor
  __ set_last_Java_frame ();

  // Change the thread state to native
  const Address thread_state_addr(Rthread, JavaThread::thread_state_offset());
#ifdef ASSERT
  {
    Label ok;
    __ lwz (r0, thread_state_addr);
    __ compare (r0, _thread_in_Java);
    __ beq (ok);
    __ should_not_reach_here (__FILE__, __LINE__);
    __ bind (ok);
  }
#endif
  __ load (r0, _thread_in_native);
  __ stw (r0, thread_state_addr);

  // Make the call
  __ call (function);
  __ fixup_after_potential_safepoint ();

  // The result will be in r3 (and maybe r4 on 32-bit) or f1.
  // Wherever it is, we need to store it before calling anything
  const Register r3_save      = r16;
#ifdef PPC32
  const Register r4_save      = r17;
#endif
  const FloatRegister f1_save = f14;

  __ mr (r3_save, r3);
#ifdef PPC32
  __ mr (r4_save, r4);
#endif
  __ fmr (f1_save, f1);

  // Switch thread to "native transition" state before reading the
  // synchronization state.  This additional state is necessary
  // because reading and testing the synchronization state is not
  // atomic with respect to garbage collection.
  __ load (r0, _thread_in_native_trans);
  __ stw (r0, thread_state_addr);

  // Ensure the new state is visible to the VM thread.
  if(os::is_MP()) {
    if (UseMembar)
      __ sync ();
    else
      __ serialize_memory (r3, r4);
  }

  // Check for safepoint operation in progress and/or pending
  // suspend requests.  We use a leaf call in order to leave
  // the last_Java_frame setup undisturbed.
  Label block, no_block;

  __ load (r3, (intptr_t) SafepointSynchronize::address_of_state());
  __ lwz (r0, Address(r3, 0));
  __ compare (r0, SafepointSynchronize::_not_synchronized);
  __ bne (block);
  __ lwz (r0, Address(Rthread, JavaThread::suspend_flags_offset()));
  __ compare (r0, 0);
  __ beq (no_block);
  __ bind (block);
  __ call_VM_leaf (
       CAST_FROM_FN_PTR(address, 
                        JavaThread::check_special_condition_for_native_trans));
  __ fixup_after_potential_safepoint ();
  __ bind (no_block);

  // Change the thread state
  __ load (r0, _thread_in_Java);
  __ stw (r0, thread_state_addr);

  // Reset the frame anchor  
  __ reset_last_Java_frame ();

  // If the result was an OOP then unbox it and store it in the frame
  // (where it will be safe from garbage collection) before we release
  // the handle it might be protected by
  Label non_oop, store_oop;
  
  __ load (r0, (intptr_t) AbstractInterpreter::result_handler(T_OBJECT));
  __ compare (r0, handler);
  __ bne (non_oop);
  __ compare (r3_save, 0);
  __ beq (store_oop);
  __ load (r3_save, Address(r3_save, 0));
  __ bind (store_oop);
  __ store (r3_save, STATE(_oop_temp));
  __ bind (non_oop);

  // Reset handle block
  __ load (r3, Address(Rthread, JavaThread::active_handles_offset()));
  __ load (r0, 0);
  __ stw (r0, Address(r3, JNIHandleBlock::top_offset_in_bytes()));

  // If there is an exception we skip the result handler and return.
  // Note that this also skips unlocking which seems totally wrong,
  // but apparently this is what the asm interpreter does so we do
  // too.
  __ load (r0, Address(Rthread, Thread::pending_exception_offset()));
  __ compare (r0, 0);
  __ bne (return_to_caller);
  
  // Unlock if necessary
  Label not_synchronized_2;
  
  __ bne (CRsync, not_synchronized_2);
  __ unlock_object (Rmonitor);
  __ bind (not_synchronized_2);

  // Restore saved result and call the result handler
  __ mr (r3, r3_save);
#ifdef PPC32
  __ mr (r4, r4_save);
#endif
  __ fmr (f1, f1_save);
  __ mtctr (handler);
  __ bctrl ();
  
  // Unwind the current activation and return
  __ bind (return_to_caller);

  generate_unwind_interpreter_state();
  __ blr ();

  native_entry = start;
  return start;
}
Exemple #11
0
// Used by compiler only; may use only caller saved, non-argument registers
// NOTE:  %%%% if any change is made to this stub make sure that the function
//             pd_code_size_limit is changed to ensure the correct size for VtableStub
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  const int sparc_code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new(sparc_code_length) VtableStub(true, vtable_index);
  // Can be NULL if there is no free space in the code cache.
  if (s == NULL) {
    return NULL;
  }

  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), sparc_code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

#ifndef PRODUCT
  if (CountCompiledCalls) {
    __ inc_counter(SharedRuntime::nof_megamorphic_calls_addr(), G5, G3_scratch);
  }
#endif /* PRODUCT */

  assert(VtableStub::receiver_location() == O0->as_VMReg(), "receiver expected in O0");

  // get receiver klass
  address npe_addr = __ pc();
  __ load_klass(O0, G3_scratch);

  // set Method* (in case of interpreted method), and destination address
#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    // check offset vs vtable length
    __ ld(G3_scratch, in_bytes(Klass::vtable_length_offset()), G5);
    __ cmp_and_br_short(G5, vtable_index*vtableEntry::size(), Assembler::greaterUnsigned, Assembler::pt, L);
    __ set(vtable_index, O2);
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), O0, O2);
    __ bind(L);
  }
#endif

  __ lookup_virtual_method(G3_scratch, vtable_index, G5_method);

#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    __ br_notnull_short(G5_method, Assembler::pt, L);
    __ stop("Vtable entry is ZERO");
    __ bind(L);
  }
#endif

  address ame_addr = __ pc();  // if the vtable entry is null, the method is abstract
                               // NOTE: for vtable dispatches, the vtable entry will never be null.

  __ ld_ptr(G5_method, in_bytes(Method::from_compiled_offset()), G3_scratch);

  // jump to target (either compiled code or c2iadapter)
  __ JMP(G3_scratch, 0);
  // load Method* (in case we call c2iadapter)
  __ delayed()->nop();

  masm->flush();

  if (PrintMiscellaneous && (WizardMode || Verbose)) {
    tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
                  vtable_index, p2i(s->entry_point()),
                  (int)(s->code_end() - s->entry_point()),
                  (int)(s->code_end() - __ pc()));
  }
  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
  // shut the door on sizing bugs
  int slop = 2*BytesPerInstWord;  // 32-bit offset is this much larger than a 13-bit one
  assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for sethi;add");

  s->set_exception_points(npe_addr, ame_addr);
  return s;
}
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  const int aarch64_code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new(aarch64_code_length) VtableStub(true, vtable_index);
  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), aarch64_code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

#ifndef PRODUCT
  if (CountCompiledCalls) {
    __ lea(r19, ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
    __ incrementw(Address(r19));
  }
#endif

  // get receiver (need to skip return address on top of stack)
  assert(VtableStub::receiver_location() == j_rarg0->as_VMReg(), "receiver expected in j_rarg0");

  // get receiver klass
  address npe_addr = __ pc();
  __ load_klass(r19, j_rarg0);

#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    // check offset vs vtable length
    __ ldrw(rscratch1, Address(r19, Klass::vtable_length_offset()));
    __ cmpw(rscratch1, vtable_index * vtableEntry::size());
    __ br(Assembler::GT, L);
    __ enter();
    __ mov(r2, vtable_index);
    __ call_VM(noreg,
               CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), j_rarg0, r2);
    __ leave();
    __ bind(L);
  }
#endif // PRODUCT

  __ lookup_virtual_method(r19, vtable_index, rmethod);

  if (DebugVtables) {
    Label L;
    __ cbz(rmethod, L);
    __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
    __ cbnz(rscratch1, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  }
  // r0: receiver klass
  // rmethod: Method*
  // r2: receiver
  address ame_addr = __ pc();
  __ ldr(rscratch1, Address(rmethod, Method::from_compiled_offset()));
  __ br(rscratch1);

  __ flush();

  if (PrintMiscellaneous && (WizardMode || Verbose)) {
    tty->print_cr("vtable #%d at " PTR_FORMAT "[%d] left over: %d",
                  vtable_index, p2i(s->entry_point()),
                  (int)(s->code_end() - s->entry_point()),
                  (int)(s->code_end() - __ pc()));
  }
  guarantee(__ pc() <= s->code_end(), "overflowed buffer");

  s->set_exception_points(npe_addr, ame_addr);
  return s;
}
// Used by compiler only; may use only caller saved, non-argument
// registers.
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  // PPC port: use fixed size.
  const int code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new (code_length) VtableStub(true, vtable_index);

  // Can be NULL if there is no free space in the code cache.
  if (s == NULL) {
    return NULL;
  }

  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

#ifndef PRODUCT
  if (CountCompiledCalls) {
    int offs = __ load_const_optimized(R11_scratch1, SharedRuntime::nof_megamorphic_calls_addr(), R12_scratch2, true);
    __ lwz(R12_scratch2, offs, R11_scratch1);
    __ addi(R12_scratch2, R12_scratch2, 1);
    __ stw(R12_scratch2, offs, R11_scratch1);
  }
#endif

  assert(VtableStub::receiver_location() == R3_ARG1->as_VMReg(), "receiver expected in R3_ARG1");

  // Get receiver klass.
  const Register rcvr_klass = R11_scratch1;

  // We might implicit NULL fault here.
  address npe_addr = __ pc(); // npe = null pointer exception
  __ load_klass_with_trap_null_check(rcvr_klass, R3);

 // Set method (in case of interpreted method), and destination address.
  int entry_offset = InstanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();

#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    // Check offset vs vtable length.
    const Register vtable_len = R12_scratch2;
    __ lwz(vtable_len, InstanceKlass::vtable_length_offset()*wordSize, rcvr_klass);
    __ cmpwi(CCR0, vtable_len, vtable_index*vtableEntry::size());
    __ bge(CCR0, L);
    __ li(R12_scratch2, vtable_index);
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), R3_ARG1, R12_scratch2, false);
    __ bind(L);
  }
#endif

  int v_off = entry_offset*wordSize + vtableEntry::method_offset_in_bytes();

  __ ld(R19_method, v_off, rcvr_klass);

#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    __ cmpdi(CCR0, R19_method, 0);
    __ bne(CCR0, L);
    __ stop("Vtable entry is ZERO", 102);
    __ bind(L);
  }
#endif

  // If the vtable entry is null, the method is abstract.
  address ame_addr = __ pc(); // ame = abstract method error

  __ load_with_trap_null_check(R12_scratch2, in_bytes(Method::from_compiled_offset()), R19_method);
  __ mtctr(R12_scratch2);
  __ bctr();

  masm->flush();

  guarantee(__ pc() <= s->code_end(), "overflowed buffer");

  s->set_exception_points(npe_addr, ame_addr);

  return s;
}
Exemple #14
0
// Used by compiler only; may use only caller saved, non-argument registers.
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {

  const int   code_length = VtableStub::pd_code_size_limit(true);
  VtableStub *s = new(code_length) VtableStub(true, vtable_index);
  if (s == NULL) { // Indicates OOM In the code cache.
    return NULL;
  }

  ResourceMark    rm;
  CodeBuffer      cb(s->entry_point(), code_length);
  MacroAssembler *masm = new MacroAssembler(&cb);
  address start_pc;
  int     padding_bytes = 0;

#if (!defined(PRODUCT) && defined(COMPILER2))
  if (CountCompiledCalls) {
    // Count unused bytes
    //                  worst case             actual size
    padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(Z_R1_scratch, (long)SharedRuntime::nof_megamorphic_calls_addr(), true);

    // Use generic emitter for direct memory increment.
    // Abuse Z_method as scratch register for generic emitter.
    // It is loaded further down anyway before it is first used.
    __ add2mem_32(Address(Z_R1_scratch), 1, Z_method);
  }
#endif

  assert(VtableStub::receiver_location() == Z_R2->as_VMReg(), "receiver expected in Z_ARG1");

  // Get receiver klass.
  // Must do an explicit check if implicit checks are disabled.
  address npe_addr = __ pc(); // npe == NULL ptr exception
  __ null_check(Z_ARG1, Z_R1_scratch, oopDesc::klass_offset_in_bytes());
  const Register rcvr_klass = Z_R1_scratch;
  __ load_klass(rcvr_klass, Z_ARG1);

  // Set method (in case of interpreted method), and destination address.
  int entry_offset = in_bytes(InstanceKlass::vtable_start_offset()) +
                     vtable_index * vtableEntry::size_in_bytes();

#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    // Check offset vs vtable length.
    const Register vtable_idx = Z_R0_scratch;

    // Count unused bytes.
    //                  worst case             actual size
    padding_bytes += __ load_const_size() - __ load_const_optimized_rtn_len(vtable_idx, vtable_index*vtableEntry::size_in_bytes(), true);

    assert(Immediate::is_uimm12(in_bytes(InstanceKlass::vtable_length_offset())), "disp to large");
    __ z_cl(vtable_idx, in_bytes(InstanceKlass::vtable_length_offset()), rcvr_klass);
    __ z_brl(L);
    __ z_lghi(Z_ARG3, vtable_index);  // Debug code, don't optimize.
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), Z_ARG1, Z_ARG3, false);
    // Count unused bytes (assume worst case here).
    padding_bytes += 12;
    __ bind(L);
  }
#endif

  int v_off = entry_offset + vtableEntry::method_offset_in_bytes();

  // Duplicate safety code from enc_class Java_Dynamic_Call_dynTOC.
  if (Displacement::is_validDisp(v_off)) {
    __ z_lg(Z_method/*method oop*/, v_off, rcvr_klass/*class oop*/);
    // Account for the load_const in the else path.
    padding_bytes += __ load_const_size();
  } else {
    // Worse case, offset does not fit in displacement field.
    __ load_const(Z_method, v_off); // Z_method temporarily holds the offset value.
    __ z_lg(Z_method/*method oop*/, 0, Z_method/*method offset*/, rcvr_klass/*class oop*/);
  }

#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    __ z_ltgr(Z_method, Z_method);
    __ z_brne(L);
    __ stop("Vtable entry is ZERO",102);
    __ bind(L);
  }
#endif

  address ame_addr = __ pc(); // ame = abstract method error

  // Must do an explicit check if implicit checks are disabled.
  __ null_check(Z_method, Z_R1_scratch, in_bytes(Method::from_compiled_offset()));
  __ z_lg(Z_R1_scratch, in_bytes(Method::from_compiled_offset()), Z_method);
  __ z_br(Z_R1_scratch);

  masm->flush();

  s->set_exception_points(npe_addr, ame_addr);

  return s;
}
// used by compiler only; may use only caller saved registers eax, ebx, ecx.
// edx holds first int arg, esi, edi, ebp are callee-save & must be preserved.
// Leave reciever in ecx; required behavior when +OptoArgsInRegisters
// is modifed to put first oop in ecx.
//
// NOTE: If this code is used by the C1, the receiver_location is always 0.
VtableStub* VtableStubs::create_vtable_stub(int vtable_index, int receiver_location) {
  const int i486_code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index, receiver_location);
  ResourceMark rm;
  MacroAssembler* masm = new MacroAssembler(new CodeBuffer(s->entry_point(), i486_code_length));

#ifndef PRODUCT
#ifdef COMPILER2
  if (CountCompiledCalls) __ incl(Address((int)OptoRuntime::nof_megamorphic_calls_addr(), relocInfo::none));
#endif
#endif

  // get receiver (need to skip return address on top of stack)
#ifdef COMPILER1
  assert(receiver_location == 0, "receiver is always in ecx - no location info needed");
#else
  if( receiver_location < SharedInfo::stack0 ) {
    assert(receiver_location == ECX_num, "receiver expected in ecx");
  } else {
    __ movl(ecx, Address(esp, SharedInfo::reg2stack(OptoReg::Name(receiver_location)) * wordSize+wordSize/*skip return address*/));
  }
#endif
  // get receiver klass
  address npe_addr = __ pc();
  __ movl(eax, Address(ecx, oopDesc::klass_offset_in_bytes()));
  // compute entry offset (in words)
  int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
#ifndef PRODUCT
  if (DebugVtables) { 
    Label L;
    // check offset vs vtable length
    __ cmpl(Address(eax, instanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size());
    __ jcc(Assembler::greater, L);
    __ movl(ebx, vtable_index);
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), ecx, ebx);
    __ bind(L);
  }
#endif // PRODUCT
  // load methodOop and target address
#ifdef COMPILER1
  __ movl(ebx, Address(eax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()));
  address ame_addr = __ pc();
  __ movl(edx, Address(ebx, methodOopDesc::from_compiled_code_entry_point_offset()));
  if (DebugVtables) {
    Label L;
    __ testl(edx, edx);
    __ jcc(Assembler::notZero, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  }
  // eax: receiver klass
  // ebx: methodOop
  // ecx: receiver
  // edx: entry point
  __ jmp(edx);
#else
  __ movl(eax, Address(eax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()));
  address ame_addr = __ pc();
  __ movl(ebx, Address(eax, methodOopDesc::from_compiled_code_entry_point_offset()));  

  if (DebugVtables) {
    Label L;
    __ testl(ebx, ebx);
    __ jcc(Assembler::notZero, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  } 


  // jump to target (either compiled code or c2iadapter)
  // eax: methodOop (in case we call c2iadapter)
  __ jmp(ebx);
#endif // COMPILER1

  masm->flush();
  s->set_exception_points(npe_addr, ame_addr);
  return s;
}