void CppInterpreterGenerator::generate_adjust_callers_stack()
{
  StackFrame frame;

  const int frame_header_size = frame.unaligned_size() + slop_factor;

  const Address param_words_addr(
    Rmethod, methodOopDesc::size_of_parameters_offset());
  const Address local_words_addr(
    Rmethod, methodOopDesc::size_of_locals_offset());

  const Register param_words = r3;
  const Register local_words = r4;

  Label loop, done;

  // Check whether extra locals are actually required
  __ lhz (param_words, param_words_addr);
  __ lhz (local_words, local_words_addr);
  __ compare (param_words, local_words);
  __ beq (done);

  // Extend the frame if necessary
  const Register required_bytes  = r5;
  const Register available_bytes = r6;

  __ shift_left (required_bytes, local_words, LogBytesPerWord);
  __ sub (available_bytes, Rlocals, r1);
  __ subi (available_bytes, available_bytes, frame_header_size);
  __ maybe_extend_frame (required_bytes, available_bytes);
  
  // Zero the extra locals
  const Register dst = r7;

  __ shift_left (dst, param_words, LogBytesPerWord);
  __ sub (dst, Rlocals, dst);
  __ sub (r0, local_words, param_words);
  __ mtctr (r0);
  __ load (r0, 0);
  __ bind (loop);
  __ store (r0, Address(dst, 0));
  __ subi (dst, dst, wordSize);
  __ bdnz (loop);
  
  __ bind (done);
}
void CppInterpreterGenerator::generate_compute_interpreter_state(bool native)
{
  StackFrame frame;

  const Address stack_words_addr(
    Rmethod, methodOopDesc::max_stack_offset());
  const Address access_flags_addr(
    Rmethod, methodOopDesc::access_flags_offset());

  Label not_synchronized_1, not_synchronized_2, not_synchronized_3;
  Label not_static, init_monitor;

  const int monitor_size = frame::interpreter_frame_monitor_size() * wordSize;

  // Calculate the access flags conditions
  const Register access_flags = r3;

  __ lwz (access_flags, access_flags_addr);
  __ andi_ (r0, access_flags, JVM_ACC_SYNCHRONIZED);
  __ compare (CRsync, r0, JVM_ACC_SYNCHRONIZED);
  __ andi_ (r0, access_flags, JVM_ACC_STATIC);
  __ compare (CRstatic, r0, JVM_ACC_STATIC);

  const int basic_frame_size =
    frame.unaligned_size() + sizeof(BytecodeInterpreter) + slop_factor;

  // Calculate the frame size
  const Register stack_size = r3;
  const Register frame_size = r4;
  const Register padding    = r5;

  if (native) {
    __ load (frame_size, basic_frame_size);
  }
  else {
    __ lhz (stack_size, stack_words_addr);
    __ shift_left (stack_size, stack_size, LogBytesPerWord);
    __ addi (frame_size, stack_size, basic_frame_size);
  }
  __ bne (CRsync, not_synchronized_1);
  __ addi (frame_size, frame_size, monitor_size);
  __ bind (not_synchronized_1);
  __ calc_padding_for_alignment (padding, frame_size, StackAlignmentInBytes);
  __ add (frame_size, frame_size, padding);

  // Save the link register and create the new frame
  __ mflr (r0);
  __ store (r0, Address(r1, StackFrame::lr_save_offset * wordSize));
  __ neg (r0, frame_size);
  __ store_update_indexed (r1, r1, r0);

  // Calculate everything's addresses
  const Register stack_limit  = r6;
  const Register stack        = r7;
  const Register stack_base   = Rmonitor;
  const Register monitor_base = r8;

  __ addi (stack_limit, r1, frame.start_of_locals() + slop_factor - wordSize);
  __ add (stack_limit, stack_limit, padding);
  if (native)
    __ mr (stack, stack_limit);
  else
    __ add (stack, stack_limit, stack_size);
  __ addi (stack_base, stack, wordSize);
  __ mr (monitor_base, stack_base);
  __ bne (CRsync, not_synchronized_2);
  __ addi (monitor_base, monitor_base, monitor_size);
  __ bind (not_synchronized_2);
  __ mr (r0, Rstate);
  __ mr (Rstate, monitor_base);

  // Initialise the interpreter state object
  __ store (Rlocals, STATE(_locals));
  __ store (Rmethod, STATE(_method));
  __ store (Rstate, STATE(_self_link));
  __ store (r0, STATE(_prev_link));
  __ store (stack_limit, STATE(_stack_limit));
  __ store (stack, STATE(_stack));
  __ store (stack_base, STATE(_stack_base));
  __ store (monitor_base, STATE(_monitor_base));
  __ store (Rthread, STATE(_thread));

#ifdef ASSERT
  {
    Label ok;
    __ load (r3, ThreadLocalStorage::thread_index());
    __ call (CAST_FROM_FN_PTR(address, pthread_getspecific));
    __ compare (Rthread, r3);
    __ beq (ok);
    __ should_not_reach_here (__FILE__, __LINE__);
    __ bind (ok);
  }
#endif

  if (!native) {
    __ load (r3, Address(Rmethod, methodOopDesc::const_offset()));
    __ addi (r3, r3, in_bytes(constMethodOopDesc::codes_offset()));
    __ store (r3, STATE(_bcp));
  }

  __ load (r3, Address(Rmethod, methodOopDesc::constants_offset()));
  __ load (r3, Address(r3, constantPoolOopDesc::cache_offset_in_bytes()));
  __ store (r3, STATE(_constants));

  __ load (r3, BytecodeInterpreter::method_entry);
  __ stw (r3, STATE(_msg)); 

  __ load (r3, 0);
  if (native)
    __ store (r3, STATE(_bcp));    
  __ store (r3, STATE(_oop_temp));
  __ store (r3, STATE(_mdx));
  __ store (r3, STATE(_result._to_call._callee));

  // Initialise the monitor if synchronized
  __ bne (CRsync, not_synchronized_3);
  __ bne (CRstatic, not_static);
  __ get_mirror_handle (r3);  
  __ b (init_monitor);
  __ bind (not_static);
  __ load (r3, Address(Rlocals, 0));
  __ bind (init_monitor);
  __ store (r3, Address(Rmonitor, BasicObjectLock::obj_offset_in_bytes()));
  __ bind (not_synchronized_3);
}
address InterpreterGenerator::generate_normal_entry(bool synchronized)
{
  assert_different_registers(Rmethod, Rlocals, Rthread, Rstate, Rmonitor);
  
  Label re_dispatch;
  Label call_interpreter;
  Label call_method;
  Label call_non_interpreted_method;
  Label return_with_exception;
  Label return_from_method;
  Label resume_interpreter;
  Label return_to_initial_caller;
  Label more_monitors;
  Label throwing_exception;

  // We use the same code for synchronized and not
  if (normal_entry)
    return normal_entry;

  address start = __ pc();

  // There are two ways in which we can arrive at this entry.
  // There is the special case where a normal interpreted method
  // calls another normal interpreted method, and there is the
  // general case of when we enter from somewhere else: from
  // call_stub, from C1 or C2, or from a fast accessor which
  // deferred. In the special case we're already in frame manager
  // code: we arrive at re_dispatch with Rstate containing the
  // previous interpreter state.  In the general case we arrive
  // at start with no previous interpreter state so we set Rstate
  // to NULL to indicate this.
  __ bind (fast_accessor_slow_entry_path);
  __ load (Rstate, 0);
  __ bind (re_dispatch);

  // Adjust the caller's stack frame to accomodate any additional
  // local variables we have contiguously with our parameters.
  generate_adjust_callers_stack();

  // Allocate and initialize our stack frame.
  generate_compute_interpreter_state(false);

  // Call the interpreter ==============================================
  __ bind (call_interpreter);

  // We can setup the frame anchor with everything we want at
  // this point as we are thread_in_Java and no safepoints can
  // occur until we go to vm mode. We do have to clear flags
  // on return from vm but that is it
  __ set_last_Java_frame ();

  // Call interpreter
  address interpreter = JvmtiExport::can_post_interpreter_events() ?
    CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks) :
    CAST_FROM_FN_PTR(address, BytecodeInterpreter::run);    

  __ mr (r3, Rstate);
  __ call (interpreter);
  __ fixup_after_potential_safepoint ();

  // Clear the frame anchor
  __ reset_last_Java_frame ();

  // Examine the message from the interpreter to decide what to do
  __ lwz (r4, STATE(_msg));
  __ compare (r4, BytecodeInterpreter::call_method);
  __ beq (call_method);
  __ compare (r4, BytecodeInterpreter::return_from_method);
  __ beq (return_from_method);
  __ compare (r4, BytecodeInterpreter::more_monitors);
  __ beq (more_monitors);
  __ compare (r4, BytecodeInterpreter::throwing_exception);
  __ beq (throwing_exception);

  __ load (r3, (intptr_t) "error: bad message from interpreter: %d\n");
  __ call (CAST_FROM_FN_PTR(address, printf));
  __ should_not_reach_here (__FILE__, __LINE__);

  // Handle a call_method message ======================================
  __ bind (call_method);

  __ load (Rmethod, STATE(_result._to_call._callee));
  __ verify_oop(Rmethod);
  __ load (Rlocals, STATE(_stack));
  __ lhz (r0, Address(Rmethod, methodOopDesc::size_of_parameters_offset()));
  __ shift_left (r0, r0, LogBytesPerWord);
  __ add (Rlocals, Rlocals, r0);

  __ load (r0, STATE(_result._to_call._callee_entry_point));
  __ load (r3, (intptr_t) start);
  __ compare (r0, r3);
  __ bne (call_non_interpreted_method);

  // Interpreted methods are intercepted and re-dispatched -----------
  __ load (r0, CAST_FROM_FN_PTR(intptr_t, RecursiveInterpreterActivation));
  __ mtlr (r0);
  __ b (re_dispatch);

  // Non-interpreted methods are dispatched normally -----------------
  __ bind (call_non_interpreted_method);
  __ mtctr (r0);
  __ bctrl ();

  // Restore Rstate
  __ load (Rstate, Address(r1, StackFrame::back_chain_offset * wordSize));
  __ subi (Rstate, Rstate, sizeof(BytecodeInterpreter));

  // Check for pending exceptions
  __ load (r0, Address(Rthread, Thread::pending_exception_offset()));
  __ compare (r0, 0);
  __ bne (return_with_exception);

  // Convert the result and resume
  generate_convert_result(CppInterpreter::_tosca_to_stack);
  __ b (resume_interpreter);

  // Handle a return_from_method message ===============================
  __ bind (return_from_method);

  __ load (r0, STATE(_prev_link));
  __ compare (r0, 0);
  __ beq (return_to_initial_caller);

  // "Return" from a re-dispatch -------------------------------------

  generate_convert_result(CppInterpreter::_stack_to_stack);
  generate_unwind_interpreter_state();

  // Resume the interpreter
  __ bind (resume_interpreter);

  __ store (Rlocals, STATE(_stack));
  __ load (Rlocals, STATE(_locals));
  __ load (Rmethod, STATE(_method));
  __ verify_oop(Rmethod);
  __ load (r0, BytecodeInterpreter::method_resume);
  __ stw (r0, STATE(_msg));
  __ b (call_interpreter);

  // Return to the initial caller (call_stub etc) --------------------
  __ bind (return_to_initial_caller);

  generate_convert_result(CppInterpreter::_stack_to_native_abi);
  generate_unwind_interpreter_state();
  __ blr ();

  // Handle a more_monitors message ====================================
  __ bind (more_monitors);

  generate_more_monitors();

  __ load (r0, BytecodeInterpreter::got_monitors);
  __ stw (r0, STATE(_msg));
  __ b (call_interpreter);

  // Handle a throwing_exception message ===============================
  __ bind (throwing_exception);

  // Check we actually have an exception
#ifdef ASSERT
  {
    Label ok;
    __ load (r0, Address(Rthread, Thread::pending_exception_offset()));
    __ compare (r0, 0);
    __ bne (ok);
    __ should_not_reach_here (__FILE__, __LINE__);
    __ bind (ok);
  }
#endif

  // Return to wherever
  generate_unwind_interpreter_state();
  __ bind (return_with_exception);
  __ compare (Rstate, 0);
  __ bne (resume_interpreter);
  __ blr ();

  normal_entry = start;
  return start;
}
Beispiel #4
0
// Call an accessor method (assuming it is resolved, otherwise drop into
// vanilla (slow path) entry.
address InterpreterGenerator::generate_accessor_entry(void) {
  if (!UseFastAccessorMethods && (!FLAG_IS_ERGO(UseFastAccessorMethods))) {
    return NULL;
  }

  Label Lslow_path, Lacquire;

  const Register
         Rclass_or_obj = R3_ARG1,
         Rconst_method = R4_ARG2,
         Rcodes        = Rconst_method,
         Rcpool_cache  = R5_ARG3,
         Rscratch      = R11_scratch1,
         Rjvmti_mode   = Rscratch,
         Roffset       = R12_scratch2,
         Rflags        = R6_ARG4,
         Rbtable       = R7_ARG5;

  static address branch_table[number_of_states];

  address entry = __ pc();

  // Check for safepoint:
  // Ditch this, real man don't need safepoint checks.

  // Also check for JVMTI mode
  // Check for null obj, take slow path if so.
  __ ld(Rclass_or_obj, Interpreter::stackElementSize, CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
  __ lwz(Rjvmti_mode, thread_(interp_only_mode));
  __ cmpdi(CCR1, Rclass_or_obj, 0);
  __ cmpwi(CCR0, Rjvmti_mode, 0);
  __ crorc(/*CCR0 eq*/2, /*CCR1 eq*/4+2, /*CCR0 eq*/2);
  __ beq(CCR0, Lslow_path); // this==null or jvmti_mode!=0

  // Do 2 things in parallel:
  // 1. Load the index out of the first instruction word, which looks like this:
  //    <0x2a><0xb4><index (2 byte, native endianess)>.
  // 2. Load constant pool cache base.
  __ ld(Rconst_method, in_bytes(Method::const_offset()), R19_method);
  __ ld(Rcpool_cache, in_bytes(ConstMethod::constants_offset()), Rconst_method);

  __ lhz(Rcodes, in_bytes(ConstMethod::codes_offset()) + 2, Rconst_method); // Lower half of 32 bit field.
  __ ld(Rcpool_cache, ConstantPool::cache_offset_in_bytes(), Rcpool_cache);

  // Get the const pool entry by means of <index>.
  const int codes_shift = exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord);
  __ slwi(Rscratch, Rcodes, codes_shift); // (codes&0xFFFF)<<codes_shift
  __ add(Rcpool_cache, Rscratch, Rcpool_cache);

  // Check if cpool cache entry is resolved.
  // We are resolved if the indices offset contains the current bytecode.
  ByteSize cp_base_offset = ConstantPoolCache::base_offset();
  // Big Endian:
  __ lbz(Rscratch, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::indices_offset()) + 7 - 2, Rcpool_cache);
  __ cmpwi(CCR0, Rscratch, Bytecodes::_getfield);
  __ bne(CCR0, Lslow_path);
  __ isync(); // Order succeeding loads wrt. load of _indices field from cpool_cache.

  // Finally, start loading the value: Get cp cache entry into regs.
  __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcpool_cache);
  __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcpool_cache);

  // Following code is from templateTable::getfield_or_static
  // Load pointer to branch table
  __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);

  // Get volatile flag
  __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // extract volatile bit
  // note: sync is needed before volatile load on PPC64

  // Check field type
  __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);

#ifdef ASSERT
  Label LFlagInvalid;
  __ cmpldi(CCR0, Rflags, number_of_states);
  __ bge(CCR0, LFlagInvalid);

  __ ld(R9_ARG7, 0, R1_SP);
  __ ld(R10_ARG8, 0, R21_sender_SP);
  __ cmpd(CCR0, R9_ARG7, R10_ARG8);
  __ asm_assert_eq("backlink", 0x543);
#endif // ASSERT
  __ mr(R1_SP, R21_sender_SP); // Cut the stack back to where the caller started.

  // Load from branch table and dispatch (volatile case: one instruction ahead)
  __ sldi(Rflags, Rflags, LogBytesPerWord);
  __ cmpwi(CCR6, Rscratch, 1); // volatile?
  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
    __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // volatile ? size of 1 instruction : 0
  }
  __ ldx(Rbtable, Rbtable, Rflags);

  if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
    __ subf(Rbtable, Rscratch, Rbtable); // point to volatile/non-volatile entry point
  }
  __ mtctr(Rbtable);
  __ bctr();

#ifdef ASSERT
  __ bind(LFlagInvalid);
  __ stop("got invalid flag", 0x6541);

  bool all_uninitialized = true,
       all_initialized   = true;
  for (int i = 0; i<number_of_states; ++i) {
    all_uninitialized = all_uninitialized && (branch_table[i] == NULL);
    all_initialized   = all_initialized   && (branch_table[i] != NULL);
  }
  assert(all_uninitialized != all_initialized, "consistency"); // either or

  __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
  if (branch_table[vtos] == 0) branch_table[vtos] = __ pc(); // non-volatile_entry point
  if (branch_table[dtos] == 0) branch_table[dtos] = __ pc(); // non-volatile_entry point
  if (branch_table[ftos] == 0) branch_table[ftos] = __ pc(); // non-volatile_entry point
  __ stop("unexpected type", 0x6551);
#endif

  if (branch_table[itos] == 0) { // generate only once
    __ align(32, 28, 28); // align load
    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
    branch_table[itos] = __ pc(); // non-volatile_entry point
    __ lwax(R3_RET, Rclass_or_obj, Roffset);
    __ beq(CCR6, Lacquire);
    __ blr();
  }

  if (branch_table[ltos] == 0) { // generate only once
    __ align(32, 28, 28); // align load
    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
    branch_table[ltos] = __ pc(); // non-volatile_entry point
    __ ldx(R3_RET, Rclass_or_obj, Roffset);
    __ beq(CCR6, Lacquire);
    __ blr();
  }

  if (branch_table[btos] == 0) { // generate only once
    __ align(32, 28, 28); // align load
    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
    branch_table[btos] = __ pc(); // non-volatile_entry point
    __ lbzx(R3_RET, Rclass_or_obj, Roffset);
    __ extsb(R3_RET, R3_RET);
    __ beq(CCR6, Lacquire);
    __ blr();
  }

  if (branch_table[ctos] == 0) { // generate only once
    __ align(32, 28, 28); // align load
    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
    branch_table[ctos] = __ pc(); // non-volatile_entry point
    __ lhzx(R3_RET, Rclass_or_obj, Roffset);
    __ beq(CCR6, Lacquire);
    __ blr();
  }

  if (branch_table[stos] == 0) { // generate only once
    __ align(32, 28, 28); // align load
    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
    branch_table[stos] = __ pc(); // non-volatile_entry point
    __ lhax(R3_RET, Rclass_or_obj, Roffset);
    __ beq(CCR6, Lacquire);
    __ blr();
  }

  if (branch_table[atos] == 0) { // generate only once
    __ align(32, 28, 28); // align load
    __ fence(); // volatile entry point (one instruction before non-volatile_entry point)
    branch_table[atos] = __ pc(); // non-volatile_entry point
    __ load_heap_oop(R3_RET, (RegisterOrConstant)Roffset, Rclass_or_obj);
    __ verify_oop(R3_RET);
    //__ dcbt(R3_RET); // prefetch
    __ beq(CCR6, Lacquire);
    __ blr();
  }

  __ align(32, 12);
  __ bind(Lacquire);
  __ twi_0(R3_RET);
  __ isync(); // acquire
  __ blr();

#ifdef ASSERT
  for (int i = 0; i<number_of_states; ++i) {
    assert(branch_table[i], "accessor_entry initialization");
    //tty->print_cr("accessor_entry: branch_table[%d] = 0x%llx (opcode 0x%llx)", i, branch_table[i], *((unsigned int*)branch_table[i]));
  }
#endif

  __ bind(Lslow_path);
  __ branch_to_entry(Interpreter::entry_for_kind(Interpreter::zerolocals), Rscratch);
  __ flush();

  return entry;
}