Exemplo n.º 1
0
static OopMap* generate_oop_map(StubAssembler* sasm, bool save_fpu_registers) {
  assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
         "mismatch in calculation");
  sasm->set_frame_size(frame_size_in_bytes / BytesPerWord);
  int frame_size_in_slots = frame_size_in_bytes / sizeof(jint);
  OopMap* oop_map = new OopMap(frame_size_in_slots, 0);

  int i;
  for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
    Register r = as_Register(i);
    if (r == G1 || r == G3 || r == G4 || r == G5) {
      int sp_offset = cpu_reg_save_offsets[i];
      oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
                                r->as_VMReg());
    }
  }

  if (save_fpu_registers) {
    for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
      FloatRegister r = as_FloatRegister(i);
      int sp_offset = fpu_reg_save_offsets[i];
      oop_map->set_callee_saved(VMRegImpl::stack2reg(sp_offset),
                                r->as_VMReg());
    }
  }
  return oop_map;
}
void CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf) {
#ifdef COMPILER2
  // Stub is fixed up when the corresponding call is converted from calling
  // compiled code to calling interpreted code.
  // set (empty), G5
  // jmp -1

  address mark = cbuf.insts_mark();  // Get mark within main instrs section.

  MacroAssembler _masm(&cbuf);

  address base =
  __ start_a_stub(to_interp_stub_size()*2);
  if (base == NULL) return;  // CodeBuffer::expand failed.

  // Static stub relocation stores the instruction address of the call.
  __ relocate(static_stub_Relocation::spec(mark));

  __ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode()));

  __ set_inst_mark();
  AddressLiteral addrlit(-1);
  __ JUMP(addrlit, G3, 0);

  __ delayed()->nop();

  // Update current stubs pointer and restore code_end.
  __ end_a_stub();
#else
  ShouldNotReachHere();
#endif
}
Exemplo n.º 3
0
static OopMap* save_live_registers(StubAssembler* sasm, bool save_fpu_registers = true) {
  assert(frame_size_in_bytes == __ total_frame_size_in_bytes(reg_save_size_in_words),
         "mismatch in calculation");
  __ save_frame_c1(frame_size_in_bytes);

  // Record volatile registers as callee-save values in an OopMap so their save locations will be
  // propagated to the caller frame's RegisterMap during StackFrameStream construction (needed for
  // deoptimization; see compiledVFrame::create_stack_value).  The caller's I, L and O registers
  // are saved in register windows - I's and L's in the caller's frame and O's in the stub frame
  // (as the stub's I's) when the runtime routine called by the stub creates its frame.
  // OopMap frame sizes are in c2 stack slot sizes (sizeof(jint))

  int i;
  for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
    Register r = as_Register(i);
    if (r == G1 || r == G3 || r == G4 || r == G5) {
      int sp_offset = cpu_reg_save_offsets[i];
      __ st_ptr(r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
    }
  }

  if (save_fpu_registers) {
    for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
      FloatRegister r = as_FloatRegister(i);
      int sp_offset = fpu_reg_save_offsets[i];
      __ stf(FloatRegisterImpl::S, r, SP, (sp_offset * BytesPerWord) + STACK_BIAS);
    }
  }

  return generate_oop_map(sasm, save_fpu_registers);
}
Exemplo n.º 4
0
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark) {
  // Stub is fixed up when the corresponding call is converted from calling
  // compiled code to calling interpreted code.
  // set (empty), G5
  // jmp -1

  if (mark == NULL) {
    mark = cbuf.insts_mark();  // Get mark within main instrs section.
  }

  MacroAssembler _masm(&cbuf);

  address base = __ start_a_stub(to_interp_stub_size());
  if (base == NULL) {
    return NULL;  // CodeBuffer::expand failed.
  }

  // Static stub relocation stores the instruction address of the call.
  __ relocate(static_stub_Relocation::spec(mark));

  __ set_metadata(NULL, as_Register(Matcher::inline_cache_reg_encode()));

  __ set_inst_mark();
  AddressLiteral addrlit(-1);
  __ JUMP(addrlit, G3, 0);

  __ delayed()->nop();

  assert(__ pc() - base <= to_interp_stub_size(), "wrong stub size");

  // Update current stubs pointer and restore code_end.
  __ end_a_stub();
  return base;
}
Exemplo n.º 5
0
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
  if (jvmci_reg < RegisterImpl::number_of_registers) {
    return as_Register(jvmci_reg)->as_VMReg();
  } else {
    jint floatRegisterNumber = jvmci_reg - RegisterImpl::number_of_registers;
    if (floatRegisterNumber < XMMRegisterImpl::number_of_registers) {
      return as_XMMRegister(floatRegisterNumber)->as_VMReg();
    }
    JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg);
  }
}
// convert Graal register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint graal_reg) {
  if (graal_reg < RegisterImpl::number_of_registers) {
    return as_Register(graal_reg)->as_VMReg();
  } else {
    jint floatRegisterNumber = graal_reg - RegisterImpl::number_of_registers;
    if (floatRegisterNumber < XMMRegisterImpl::number_of_registers) {
      return as_XMMRegister(floatRegisterNumber)->as_VMReg();
    }
    ShouldNotReachHere();
    return NULL;
  }
}
Exemplo n.º 7
0
static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registers = true) {
  for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
    Register r = as_Register(i);
    if (r == G1 || r == G3 || r == G4 || r == G5) {
      __ ld_ptr(SP, (cpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
    }
  }

  if (restore_fpu_registers) {
    for (int i = 0; i < FrameMap::nof_fpu_regs; i++) {
      FloatRegister r = as_FloatRegister(i);
      __ ldf(FloatRegisterImpl::S, SP, (fpu_reg_save_offsets[i] * BytesPerWord) + STACK_BIAS, r);
    }
  }
}
Exemplo n.º 8
0
void C1_MacroAssembler::initialize_body(Register objectFields, Register len_in_bytes, Register Rzero) {
  Label done;
  assert_different_registers(objectFields, len_in_bytes, Rzero);

  // Initialize object fields.
  // See documentation for MVCLE instruction!!!
  assert(objectFields->encoding()%2==0, "objectFields must be an even register");
  assert(len_in_bytes->encoding() == (objectFields->encoding()+1), "objectFields and len_in_bytes must be a register pair");
  assert(Rzero->encoding()%2==1, "Rzero must be an odd register");

  // Use Rzero as src length, then mvcle will copy nothing
  // and fill the object with the padding value 0.
  move_long_ext(objectFields, as_Register(Rzero->encoding()-1), 0);
  bind(done);
}
Exemplo n.º 9
0
void Runtime1::initialize_pd() {
  // compute word offsets from SP at which live (non-windowed) registers are captured by stub routines
  //
  // A stub routine will have a frame that is at least large enough to hold
  // a register window save area (obviously) and the volatile g registers
  // and floating registers. A user of save_live_registers can have a frame
  // that has more scratch area in it (although typically they will use L-regs).
  // in that case the frame will look like this (stack growing down)
  //
  // FP -> |             |
  //       | scratch mem |
  //       |   "      "  |
  //       --------------
  //       | float regs  |
  //       |   "    "    |
  //       ---------------
  //       | G regs      |
  //       | "  "        |
  //       ---------------
  //       | abi reg.    |
  //       | window save |
  //       | area        |
  // SP -> ---------------
  //
  int i;
  int sp_offset = round_to(frame::register_save_words, 2); //  start doubleword aligned

  // only G int registers are saved explicitly; others are found in register windows
  for (i = 0; i < FrameMap::nof_cpu_regs; i++) {
    Register r = as_Register(i);
    if (r == G1 || r == G3 || r == G4 || r == G5) {
      cpu_reg_save_offsets[i] = sp_offset;
      sp_offset++;
    }
  }

  // all float registers are saved explicitly
  assert(FrameMap::nof_fpu_regs == 32, "double registers not handled here");
  for (i = 0; i < FrameMap::nof_fpu_regs; i++) {
    fpu_reg_save_offsets[i] = sp_offset;
    sp_offset++;
  }
  reg_save_size_in_words = sp_offset - frame::memory_parameter_word_sp_offset;
  // this should match assembler::total_frame_size_in_bytes, which
  // isn't callable from this context.  It's checked by an assert when
  // it's used though.
  frame_size_in_bytes = align_size_up(sp_offset * wordSize, 8);
}
Exemplo n.º 10
0
void C1_MacroAssembler::invalidate_registers(Register preserve1,
                                             Register preserve2,
                                             Register preserve3) {
  Register dead_value = noreg;
  for (int i = 0; i < FrameMap::nof_cpu_regs; i++) {
    Register r = as_Register(i);
    if (r != preserve1 && r != preserve2 && r != preserve3 && r != Z_SP && r != Z_thread) {
      if (dead_value == noreg) {
        load_const_optimized(r, 0xc1dead);
        dead_value = r;
      } else {
        z_lgr(r, dead_value);
      }
    }
  }
}
Exemplo n.º 11
0
// convert JVMCI register indices (as used in oop maps) to HotSpot registers
VMReg CodeInstaller::get_hotspot_reg(jint jvmci_reg, TRAPS) {
  // JVMCI Registers are numbered as follows:
  //   0..31: Thirty-two General Purpose registers (CPU Registers)
  //   32..63: Thirty-two single precision float registers
  //   64..95: Thirty-two double precision float registers
  //   96..111: Sixteen quad precision float registers
  if (jvmci_reg < 32) {
    return as_Register(jvmci_reg)->as_VMReg();
  } else {
    jint floatRegisterNumber;
    if(jvmci_reg < 64) { // Single precision
      floatRegisterNumber = jvmci_reg - 32;
    } else if(jvmci_reg < 96) {
      floatRegisterNumber = 2 * (jvmci_reg - 64);
    } else if(jvmci_reg < 112) {
      floatRegisterNumber = 4 * (jvmci_reg - 96);
    } else {
      JVMCI_ERROR_NULL("invalid register number: %d", jvmci_reg);
    }
    return as_FloatRegister(floatRegisterNumber)->as_VMReg();
  }
}
Exemplo n.º 12
0
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) {
#ifdef COMPILER2
  // Stub is fixed up when the corresponding call is converted from calling
  // compiled code to calling interpreted code.
  if (mark == NULL) {
    // Get the mark within main instrs section which is set to the address of the call.
    mark = cbuf.insts_mark();
  }
  assert(mark != NULL, "mark must not be NULL");

  // Note that the code buffer's insts_mark is always relative to insts.
  // That's why we must use the macroassembler to generate a stub.
  MacroAssembler _masm(&cbuf);

  address stub = __ start_a_stub(Compile::MAX_stubs_size);
  if (stub == NULL) {
    return NULL;  // CodeBuffer::expand failed.
  }
  __ relocate(static_stub_Relocation::spec(mark));

  AddressLiteral meta = __ allocate_metadata_address(NULL);
  bool success = __ load_const_from_toc(as_Register(Matcher::inline_cache_reg_encode()), meta);

  __ set_inst_mark();
  AddressLiteral a((address)-1);
  success = success && __ load_const_from_toc(Z_R1, a);
  if (!success) {
    return NULL;  // CodeCache is full.
  }

  __ z_br(Z_R1);
  __ end_a_stub(); // Update current stubs pointer and restore insts_end.
  return stub;
#else
  ShouldNotReachHere();
#endif
}
Exemplo n.º 13
0
address CompiledStaticCall::emit_to_interp_stub(CodeBuffer &cbuf, address mark/* = NULL*/) {
#ifdef COMPILER2
    if (mark == NULL) {
        // Get the mark within main instrs section which is set to the address of the call.
        mark = cbuf.insts_mark();
    }

    // Note that the code buffer's insts_mark is always relative to insts.
    // That's why we must use the macroassembler to generate a stub.
    MacroAssembler _masm(&cbuf);

    // Start the stub.
    address stub = __ start_a_stub(CompiledStaticCall::to_interp_stub_size());
    if (stub == NULL) {
        return NULL; // CodeCache is full
    }

    // For java_to_interp stubs we use R11_scratch1 as scratch register
    // and in call trampoline stubs we use R12_scratch2. This way we
    // can distinguish them (see is_NativeCallTrampolineStub_at()).
    Register reg_scratch = R11_scratch1;

    // Create a static stub relocation which relates this stub
    // with the call instruction at insts_call_instruction_offset in the
    // instructions code-section.
    __ relocate(static_stub_Relocation::spec(mark));
    const int stub_start_offset = __ offset();

    // Now, create the stub's code:
    // - load the TOC
    // - load the inline cache oop from the constant pool
    // - load the call target from the constant pool
    // - call
    __ calculate_address_from_global_toc(reg_scratch, __ method_toc());
    AddressLiteral ic = __ allocate_metadata_address((Metadata *)NULL);
    bool success = __ load_const_from_method_toc(as_Register(Matcher::inline_cache_reg_encode()),
                   ic, reg_scratch, /*fixed_size*/ true);
    if (!success) {
        return NULL; // CodeCache is full
    }

    if (ReoptimizeCallSequences) {
        __ b64_patchable((address)-1, relocInfo::none);
    } else {
        AddressLiteral a((address)-1);
        success = __ load_const_from_method_toc(reg_scratch, a, reg_scratch, /*fixed_size*/ true);
        if (!success) {
            return NULL; // CodeCache is full
        }
        __ mtctr(reg_scratch);
        __ bctr();
    }

    // FIXME: Assert that the stub can be identified and patched.

    // Java_to_interp_stub_size should be good.
    assert((__ offset() - stub_start_offset) <= CompiledStaticCall::to_interp_stub_size(),
           "should be good size");
    assert(!is_NativeCallTrampolineStub_at(__ addr_at(stub_start_offset)),
           "must not confuse java_to_interp with trampoline stubs");

// End the stub.
    __ end_a_stub();
    return stub;
#else
    ShouldNotReachHere();
    return NULL;
#endif
}
Exemplo n.º 14
0
void trace_method_handle_stub(const char* adaptername,
                              oop mh,
                              intptr_t* saved_regs,
                              intptr_t* entry_sp) {
  // called as a leaf from native code: do not block the JVM!
  bool has_mh = (strstr(adaptername, "/static") == NULL &&
                 strstr(adaptername, "linkTo") == NULL);    // static linkers don't have MH
  const char* mh_reg_name = has_mh ? "rcx_mh" : "rcx";
  tty->print_cr("MH %s %s="PTR_FORMAT" sp="PTR_FORMAT,
                adaptername, mh_reg_name,
                mh, entry_sp);

  if (Verbose) {
    tty->print_cr("Registers:");
    const int saved_regs_count = RegisterImpl::number_of_registers;
    for (int i = 0; i < saved_regs_count; i++) {
      Register r = as_Register(i);
      // The registers are stored in reverse order on the stack (by pusha).
      tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[((saved_regs_count - 1) - i)]);
      if ((i + 1) % 4 == 0) {
        tty->cr();
      } else {
        tty->print(", ");
      }
    }
    tty->cr();

    {
     // dumping last frame with frame::describe

      JavaThread* p = JavaThread::active();

      ResourceMark rm;
      PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
      FrameValues values;

      // Note: We want to allow trace_method_handle from any call site.
      // While trace_method_handle creates a frame, it may be entered
      // without a PC on the stack top (e.g. not just after a call).
      // Walking that frame could lead to failures due to that invalid PC.
      // => carefully detect that frame when doing the stack walking

      // Current C frame
      frame cur_frame = os::current_frame();

      // Robust search of trace_calling_frame (independant of inlining).
      // Assumes saved_regs comes from a pusha in the trace_calling_frame.
      assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
      frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
      while (trace_calling_frame.fp() < saved_regs) {
        trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
      }

      // safely create a frame and call frame::describe
      intptr_t *dump_sp = trace_calling_frame.sender_sp();
      intptr_t *dump_fp = trace_calling_frame.link();

      bool walkable = has_mh; // whether the traced frame shoud be walkable

      if (walkable) {
        // The previous definition of walkable may have to be refined
        // if new call sites cause the next frame constructor to start
        // failing. Alternatively, frame constructors could be
        // modified to support the current or future non walkable
        // frames (but this is more intrusive and is not considered as
        // part of this RFE, which will instead use a simpler output).
        frame dump_frame = frame(dump_sp, dump_fp);
        dump_frame.describe(values, 1);
      } else {
        // Stack may not be walkable (invalid PC above FP):
        // Add descriptions without building a Java frame to avoid issues
        values.describe(-1, dump_fp, "fp for #1 <not parsed, cannot trust pc>");
        values.describe(-1, dump_sp, "sp for #1");
      }
      values.describe(-1, entry_sp, "raw top of stack");

      tty->print_cr("Stack layout:");
      values.print(p);
    }
    if (has_mh && mh->is_oop()) {
      mh->print();
      if (java_lang_invoke_MethodHandle::is_instance(mh)) {
        if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
          java_lang_invoke_MethodHandle::form(mh)->print();
      }
    }
  }
}
Exemplo n.º 15
0
 // derived registers, offsets, and addresses
 Register successor() const
 {
   return as_Register(encoding() + 1);
 }
Exemplo n.º 16
0
  const char* name() const;

  // testers
  bool is_valid() const                     { return 0 <= value() && value() < number_of_registers; }

  bool is_stacked() const                   { return value() >= 32 && value() <= 127; }
  bool is_callee_saved() const              { return value() >= 4 && value() <= 7;    }
  bool is_caller_saved() const              { return !is_stacked() && !is_callee_saved(); }

  // derived registers, offsets, and addresses
  Register successor() const                { return as_Register(encoding() + 1); }
};

// The general registers of the IA64 architecture

const Register gnoreg = as_Register(-1);
const Register noreg  = gnoreg;

const Register GR0    = as_Register(0);
const Register GR1    = as_Register(1);
const Register GR2    = as_Register(2);
const Register GR3    = as_Register(3);
const Register GR4    = as_Register(4);
const Register GR5    = as_Register(5);
const Register GR6    = as_Register(6);
const Register GR7    = as_Register(7);
const Register GR8    = as_Register(8);
const Register GR9    = as_Register(9);
const Register GR10   = as_Register(10);
const Register GR11   = as_Register(11);
const Register GR12   = as_Register(12);
Exemplo n.º 17
0
  address generate_call_stub(address& return_address)
  {
    assert (!TaggedStackInterpreter, "not supported");
    
    StubCodeMark mark(this, "StubRoutines", "call_stub");
    address start = __ enter();

    const Register call_wrapper    = r3;
    const Register result          = r4;
    const Register result_type     = r5;
    const Register method          = r6;
    const Register entry_point     = r7;
    const Register parameters      = r8;
    const Register parameter_words = r9;
    const Register thread          = r10;

#ifdef ASSERT
    // Make sure we have no pending exceptions
    {
      StackFrame frame;
      Label label;

      __ load (r0, Address(thread, Thread::pending_exception_offset()));
      __ compare (r0, 0);
      __ beq (label);
      __ prolog (frame);
      __ should_not_reach_here (__FILE__, __LINE__);
      __ epilog (frame);
      __ blr ();
      __ bind (label);
    }
#endif // ASSERT

    // Calculate the frame size
    StackFrame frame;
    for (int i = 0; i < StackFrame::max_crfs; i++)
      frame.get_cr_field();
    for (int i = 0; i < StackFrame::max_gprs; i++)
      frame.get_register();
    StubRoutines::set_call_stub_base_size(frame.unaligned_size() + 3*wordSize);
    // the 3 extra words are for call_wrapper, result and result_type

    const Register parameter_bytes = parameter_words;

    __ shift_left (parameter_bytes, parameter_words, LogBytesPerWord);    

    const Register frame_size = r11;
    const Register padding    = r12;

    __ addi (frame_size, parameter_bytes, StubRoutines::call_stub_base_size());
    __ calc_padding_for_alignment (padding, frame_size, StackAlignmentInBytes);
    __ add (frame_size, frame_size, padding);

    // Save the link register and create the new frame
    __ mflr (r0);
    __ store (r0, Address(r1, StackFrame::lr_save_offset * wordSize));
    __ neg (r0, frame_size);
    __ store_update_indexed (r1, r1, r0);
#ifdef PPC64
    __ mfcr (r0);
    __ store (r0, Address(r1, StackFrame::cr_save_offset * wordSize));
#endif // PPC64

    // Calculate the address of the interpreter's local variables
    const Register locals = frame_size;

    __ addi (locals, r1, frame.start_of_locals() - wordSize);
    __ add (locals, locals, padding);
    __ add (locals, locals, parameter_bytes);

    // Store the call wrapper address and the result stuff
    const int initial_offset = 1;
    int offset = initial_offset;

    __ store (call_wrapper, Address(locals, offset++ * wordSize));
    __ store (result,       Address(locals, offset++ * wordSize));
    __ store (result_type,  Address(locals, offset++ * wordSize));

    // Store the registers
#ifdef PPC32
    __ mfcr (r0);
    __ store (r0, Address(locals, offset++ * wordSize));
#endif // PPC32
    for (int i = 14; i < 32; i++) {
      __ store (as_Register(i), Address(locals, offset++ * wordSize));
    }
    const int final_offset = offset;

    // Store the location of call_wrapper
    frame::set_call_wrapper_offset((final_offset - initial_offset) * wordSize);

#ifdef ASSERT
    // Check that we wrote all the way to the end of the frame.
    // The frame may have been resized when we return from the
    // interpreter, so the start of the frame may have moved
    // but the end will be where we left it and we rely on this
    // to find our stuff.
    {
      StackFrame frame;
      Label label;

      __ load (r3, Address(r1, 0));
      __ subi (r3, r3, final_offset * wordSize);
      __ compare (r3, locals);
      __ beq (label);
      __ prolog (frame);
      __ should_not_reach_here (__FILE__, __LINE__);
      __ epilog (frame);
      __ blr ();
      __ bind (label);
    }
#endif // ASSERT

    // Pass parameters if any
    {
      Label loop, done;

      __ compare (parameter_bytes, 0);
      __ ble (done);

      const Register src = parameters;
      const Register dst = padding;

      __ mr (dst, locals);
      __ shift_right (r0, parameter_bytes, LogBytesPerWord);      
      __ mtctr (r0);
      __ bind (loop);
      __ load (r0, Address(src, 0));
      __ store (r0, Address(dst, 0));
      __ addi (src, src, wordSize);
      __ subi (dst, dst, wordSize);
      __ bdnz (loop);

      __ bind (done);
    }

    // Make the call
    __ mr (Rmethod, method);
    __ mr (Rlocals, locals);
    __ mr (Rthread, thread);
    __ mtctr (entry_point);
    __ bctrl();

    // This is used to identify call_stub stack frames
    return_address = __ pc();

    // Figure out where our stuff is stored
    __ load (locals, Address(r1, 0));
    __ subi (locals, locals, final_offset * wordSize);

#ifdef ASSERT
    // Rlocals should contain the address we just calculated.
    {
      StackFrame frame;
      Label label;

      __ compare (Rlocals, locals);
      __ beq (label);
      __ prolog (frame);
      __ should_not_reach_here (__FILE__, __LINE__);
      __ epilog (frame);
      __ blr ();
      __ bind (label);
    }
#endif // ASSERT
 
    // Is an exception being thrown?
    Label exit;

    __ load (r0, Address(Rthread, Thread::pending_exception_offset()));
    __ compare (r0, 0);
    __ bne (exit);

    // Store result depending on type
    const Register result_addr = r6;

    Label is_int, is_long, is_object;

    offset = initial_offset + 1; // skip call_wrapper
    __ load (result_addr, Address(locals, offset++ * wordSize));
    __ load (result_type, Address(locals, offset++ * wordSize));
    __ compare (result_type, T_INT);
    __ beq (is_int);
    __ compare (result_type, T_LONG);
    __ beq (is_long);
    __ compare (result_type, T_OBJECT);
    __ beq (is_object);
    
    __ should_not_reach_here (__FILE__, __LINE__);

    __ bind (is_int);
    __ stw (r3, Address(result_addr, 0));
    __ b (exit);
    
    __ bind (is_long);
#ifdef PPC32
    __ store (r4, Address(result_addr, wordSize));
#endif
    __ store (r3, Address(result_addr, 0));
    __ b (exit);
    
    __ bind (is_object);
    __ store (r3, Address(result_addr, 0));
    //__ b (exit);

    // Restore the registers
    __ bind (exit);
#ifdef PPC32
    __ load (r0, Address(locals, offset++ * wordSize));
    __ mtcr (r0);
#endif // PPC32
    for (int i = 14; i < 32; i++) {
      __ load (as_Register(i), Address(locals, offset++ * wordSize));
    }
#ifdef PPC64
    __ load (r0, Address(r1, StackFrame::cr_save_offset * wordSize));
    __ mtcr (r0);
#endif // PPC64
    assert (offset == final_offset, "save and restore must match");

    // Unwind and return
    __ load (r1, Address(r1, StackFrame::back_chain_offset * wordSize));
    __ load (r0, Address(r1, StackFrame::lr_save_offset * wordSize));
    __ mtlr (r0);
    __ blr ();
    
    return start;
  }
Exemplo n.º 18
0
void trace_method_handle_stub(const char* adaptername,
                              oopDesc* mh,
                              intptr_t* entry_sp,
                              intptr_t* saved_regs) {

  bool has_mh = (strstr(adaptername, "/static") == NULL &&
                 strstr(adaptername, "linkTo") == NULL);    // static linkers don't have MH
  const char* mh_reg_name = has_mh ? "R23_method_handle" : "G23";
  tty->print_cr("MH %s %s="INTPTR_FORMAT " sp=" INTPTR_FORMAT,
                adaptername, mh_reg_name, p2i(mh), p2i(entry_sp));

  if (Verbose) {
    tty->print_cr("Registers:");
    const int abi_offset = frame::abi_reg_args_size / 8;
    for (int i = R3->encoding(); i <= R12->encoding(); i++) {
      Register r = as_Register(i);
      int count = i - R3->encoding();
      // The registers are stored in reverse order on the stack (by save_volatile_gprs(R1_SP, abi_reg_args_size)).
      tty->print("%3s=" PTR_FORMAT, r->name(), saved_regs[abi_offset + count]);
      if ((count + 1) % 4 == 0) {
        tty->cr();
      } else {
        tty->print(", ");
      }
    }
    tty->cr();

    {
      // dumping last frame with frame::describe

      JavaThread* p = JavaThread::active();

      ResourceMark rm;
      PRESERVE_EXCEPTION_MARK; // may not be needed by safer and unexpensive here
      FrameValues values;

      // Note: We want to allow trace_method_handle from any call site.
      // While trace_method_handle creates a frame, it may be entered
      // without a PC on the stack top (e.g. not just after a call).
      // Walking that frame could lead to failures due to that invalid PC.
      // => carefully detect that frame when doing the stack walking

      // Current C frame
      frame cur_frame = os::current_frame();

      // Robust search of trace_calling_frame (independant of inlining).
      // Assumes saved_regs comes from a pusha in the trace_calling_frame.
      assert(cur_frame.sp() < saved_regs, "registers not saved on stack ?");
      frame trace_calling_frame = os::get_sender_for_C_frame(&cur_frame);
      while (trace_calling_frame.fp() < saved_regs) {
        trace_calling_frame = os::get_sender_for_C_frame(&trace_calling_frame);
      }

      // Safely create a frame and call frame::describe.
      intptr_t *dump_sp = trace_calling_frame.sender_sp();

      frame dump_frame = frame(dump_sp);
      dump_frame.describe(values, 1);

      values.describe(-1, saved_regs, "raw top of stack");

      tty->print_cr("Stack layout:");
      values.print(p);
    }

    if (has_mh && mh->is_oop()) {
      mh->print();
      if (java_lang_invoke_MethodHandle::is_instance(mh)) {
        if (java_lang_invoke_MethodHandle::form_offset_in_bytes() != 0)
          java_lang_invoke_MethodHandle::form(mh)->print();
      }
    }
  }
}
Exemplo n.º 19
0
 Register after_save() const {
   assert(is_out() || is_global(), "register not visible after save");
   return is_out() ? as_Register(encoding() + (ibase - obase)) : (const Register)this;
 }
address AbstractInterpreterGenerator::generate_slow_signature_handler() {
  address entry = __ pc();

  __ andr(esp, esp, -16);
  __ mov(c_rarg3, esp);
  // rmethod
  // rlocals
  // c_rarg3: first stack arg - wordSize

  // adjust sp
  __ sub(sp, c_rarg3, 18 * wordSize);
  __ str(lr, Address(__ pre(sp, -2 * wordSize)));
  __ call_VM(noreg,
             CAST_FROM_FN_PTR(address,
                              InterpreterRuntime::slow_signature_handler),
             rmethod, rlocals, c_rarg3);

  // r0: result handler

  // Stack layout:
  // rsp: return address           <- sp
  //      1 garbage
  //      8 integer args (if static first is unused)
  //      1 float/double identifiers
  //      8 double args
  //        stack args              <- esp
  //        garbage
  //        expression stack bottom
  //        bcp (NULL)
  //        ...

  // Restore LR
  __ ldr(lr, Address(__ post(sp, 2 * wordSize)));

  // Do FP first so we can use c_rarg3 as temp
  __ ldrw(c_rarg3, Address(sp, 9 * wordSize)); // float/double identifiers

  for (int i = 0; i < Argument::n_float_register_parameters_c; i++) {
    const FloatRegister r = as_FloatRegister(i);

    Label d, done;

    __ tbnz(c_rarg3, i, d);
    __ ldrs(r, Address(sp, (10 + i) * wordSize));
    __ b(done);
    __ bind(d);
    __ ldrd(r, Address(sp, (10 + i) * wordSize));
    __ bind(done);
  }

  // c_rarg0 contains the result from the call of
  // InterpreterRuntime::slow_signature_handler so we don't touch it
  // here.  It will be loaded with the JNIEnv* later.
  __ ldr(c_rarg1, Address(sp, 1 * wordSize));
  for (int i = c_rarg2->encoding(); i <= c_rarg7->encoding(); i += 2) {
    Register rm = as_Register(i), rn = as_Register(i+1);
    __ ldp(rm, rn, Address(sp, i * wordSize));
  }

  __ add(sp, sp, 18 * wordSize);
  __ ret(lr);

  return entry;
}
Exemplo n.º 21
0
 Register after_restore() const {
   assert(is_in() || is_global(), "register not visible after restore");
   return is_in() ? as_Register(encoding() + (obase - ibase)) : (const Register)this;
 }