address generate_catch_exception() {
    StubCodeMark mark(this, "StubRoutines", "catch_exception");
    const Address esp_after_call(ebp, -4 * wordSize); // same as in generate_call_stub()!
    const Address thread        (ebp,  9 * wordSize); // same as in generate_call_stub()!
    address start = __ pc();

    // get thread directly
    __ movl(ecx, thread);
#ifdef ASSERT
    // verify that threads correspond
    { Label L;
      __ get_thread(ebx);
      __ cmpl(ebx, ecx);
      __ jcc(Assembler::equal, L);
      __ stop("StubRoutines::catch_exception: threads must correspond");
      __ bind(L);
    }
#endif
    // set pending exception
    __ verify_oop(eax);
    __ movl(Address(ecx, Thread::pending_exception_offset()), eax          );
    __ movl(Address(ecx, Thread::exception_file_offset   ()), (int)__FILE__);
    __ movl(Address(ecx, Thread::exception_line_offset   ()),      __LINE__);
    // complete return to VM
    assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before");
    __ jmp(StubRoutines::_call_stub_return_address, relocInfo::none);

    return start;
  }
void InterpreterRuntime::SignatureHandlerGenerator::box(int from_offset, int to_offset) {
    __ leal(temp(), Address(from(), from_offset * wordSize));
    __ cmpl(Address(from(), from_offset * wordSize), 0); // do not use temp() to avoid AGI
    Label L;
    __ jcc(Assembler::notZero, L);
    __ movl(temp(), 0);
    __ bind(L);
    __ movl(Address(to(), to_offset * wordSize), temp());
}
示例#3
0
void CompilerStubs::generate_compiler_new_object() {
  comment_section("Compiler new object (any size)");
  comment("Register edx holds the instance size, register ebx holds the prototypical near of the instance class");
  Label slow_case;
  entry("compiler_new_object");

  comment("Get _inline_allocation_top");
  movl(eax, Address(Constant("_inline_allocation_top")));

  comment("Compute new top");
  leal(ecx, Address(eax, edx, times_1));

  if (GenerateDebugAssembly) {
    comment("Check ExcessiveGC");
    testl(Address(Constant("ExcessiveGC")), Constant(0));
    jcc(not_zero, Constant(slow_case));
  }

  comment("Compare against _inline_allocation_end");
  cmpl(ecx, Address(Constant("_inline_allocation_end")));
  jcc(above, Constant(slow_case));

  comment("Allocation succeeded, set _inline_allocation_top");
  movl(Address(Constant("_inline_allocation_top")), ecx);

  comment("Set prototypical near in object; no need for write barrier");
  movl(Address(eax), ebx);

  comment("Compute remaining size");
  decrement(edx, oopSize);

  comment("One-word object?");
  Label init_done;
  jcc(zero, Constant(init_done));

  comment("Zero object fields");
  xorl(ecx, ecx);
  Label init_loop;
  bind(init_loop);
  movl(Address(eax, edx, times_1), ecx);
  decrement(edx, oopSize);
  jcc(not_zero, Constant(init_loop));
  bind(init_done);

  comment("The newly allocated object is in register eax");
  ret();

  comment("Slow case - call the VM runtime system");
  bind(slow_case);
  leal(eax, Address(Constant("newobject")));
  goto_shared_call_vm(T_OBJECT);

  entry_end(); // compiler_new_object
}
  address generate_forward_exception() {
    StubCodeMark mark(this, "StubRoutines", "forward exception");
    address start = __ pc();

    // Upon entry, the sp points to the return address returning into Java
    // (interpreted or compiled) code; i.e., the return address becomes the
    // throwing pc.
    //
    // Arguments pushed before the runtime call are still on the stack but
    // the exception handler will reset the stack pointer -> ignore them.
    // A potential result in registers can be ignored as well.

#ifdef ASSERT
    // make sure this code is only executed if there is a pending exception
    { Label L;
      __ get_thread(ecx);
      __ cmpl(Address(ecx, Thread::pending_exception_offset()), (int)NULL);
      __ jcc(Assembler::notEqual, L);
      __ stop("StubRoutines::forward exception: no pending exception (1)");
      __ bind(L);
    }
#endif

    // compute exception handler into ebx
    __ movl(eax, Address(esp));
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), eax);
    __ movl(ebx, eax);

    // setup eax & edx, remove return address & clear pending exception
    __ get_thread(ecx);
    __ popl(edx);
    __ movl(eax, Address(ecx, Thread::pending_exception_offset()));
    __ movl(Address(ecx, Thread::pending_exception_offset()), (int)NULL);

#ifdef ASSERT
    // make sure exception is set
    { Label L;
      __ testl(eax, eax);
      __ jcc(Assembler::notEqual, L);
      __ stop("StubRoutines::forward exception: no pending exception (2)");
      __ bind(L);
    }
#endif

    // continue at exception handler (return address removed)
    // eax: exception
    // ebx: exception handler
    // edx: throwing pc
    __ verify_oop(eax);
    __ jmp(ebx);

    return start;
  }
示例#5
0
void InterpreterMacroAssembler::narrow(Register result) {

  // Get method->_constMethod->_result_type
  movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
  movptr(rcx, Address(rcx, Method::const_offset()));
  load_unsigned_byte(rcx, Address(rcx, ConstMethod::result_type_offset()));

  Label done, notBool, notByte, notChar;

  // common case first
  cmpl(rcx, T_INT);
  jcc(Assembler::equal, done);

  // mask integer result to narrower return type.
  cmpl(rcx, T_BOOLEAN);
  jcc(Assembler::notEqual, notBool);
  andl(result, 0x1);
  jmp(done);

  bind(notBool);
  cmpl(rcx, T_BYTE);
  jcc(Assembler::notEqual, notByte);
  LP64_ONLY(movsbl(result, result);)
示例#6
0
void CompilerStubs::generate_compiler_idiv_irem() {
  comment_section("Compiler integer divide and remainder");
  comment("Register eax holds the dividend, register ebx holds the divisor");

  entry("compiler_idiv_irem");
  const int min_int = 0x80000000;
  Label normal_case, special_case;

  Label throw_exception;

  testl(ebx,ebx);
  jcc(equal, Constant(throw_exception));

  // Check for special case
  cmpl(eax, Constant(min_int));
  jcc(not_equal, Constant(normal_case));
  xorl(edx, edx); // Prepare edx for possible special case (where remainder = 0)
  cmpl(ebx, Constant(-1));
  jcc(equal, Constant(special_case));

  // Handle normal case
  bind(normal_case);
  cdql();
  idivl(ebx);

  // Normal and special case exit
  bind(special_case);
  ret();

  bind(throw_exception);
  comment("Throw a DivisionByZeroException");
  leal(eax, Address(Constant("division_by_zero_exception")));
  goto_shared_call_vm(T_VOID);

  entry_end(); // compiler_idiv_irem
}
void InterpreterStubs::generate_interpreter_rethrow_exception() {
  comment_section("Interpreter rethrow exception");
  comment("Register eax holds the exception; Interpreter state is not in registers");

  entry("interpreter_rethrow_exception");
  comment("Restore bytecode and locals pointers");
  movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset())));
  movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset())));

  comment("Mark the bytecode pointer as being inside an exception");
  addl(esi, Constant(JavaFrame::exception_frame_flag));

  comment("Clear the expression stack");
  movl(esp, Address(ebp, Constant(JavaFrame::stack_bottom_pointer_offset())));
  
  comment("Push the exception on the expression stack");
  push_obj(eax);

  comment("Get exception handler bci for exception");
  interpreter_call_vm(Constant("exception_handler_bci_for_exception"), T_INT);
  
  comment("Check if we got a bci - otherwise unwind the activation");
  cmpl(eax, Constant(-1));
  jcc(equal, Constant("interpreter_unwind_activation"));
#if ENABLE_JAVA_DEBUGGER
  Label skip;
  cmpb(Address(Constant("_debugger_active")), Constant(0));
  jcc(equal, Constant(skip));
  movl(edx, eax);
  interpreter_call_vm(Constant("handle_caught_exception"), T_VOID);
  comment("Re-get exception handler bci for exception");
  interpreter_call_vm(Constant("exception_handler_bci_for_exception"), T_INT);
  bind(skip);
#endif
  comment("Convert the bytecode index into a bytecode pointer");
  movl(ecx, Address(ebp, Constant(JavaFrame::method_offset())));
  leal(esi, Address(ecx, eax, times_1, Constant(Method::base_offset())));

  // Dispatch to the exception handler.
  dispatch_next();

  entry_end(); // interpreter_rethrow_exception
}
示例#8
0
void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
  Label L;
  BLOCK_COMMENT("verify_ref_kind {");
  __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())));
  __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
  __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
  __ cmpl(temp, ref_kind);
  __ jcc(Assembler::equal, L);
  { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
    jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
    if (ref_kind == JVM_REF_invokeVirtual ||
        ref_kind == JVM_REF_invokeSpecial)
      // could do this for all ref_kinds, but would explode assembly code size
      trace_method_handle(_masm, buf);
    __ STOP(buf);
  }
  BLOCK_COMMENT("} verify_ref_kind");
  __ bind(L);
}
void MacroAssembler::fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register eax, Register ecx, Register edx, Register tmp1, Register tmp2) {
  Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2;
  Label L_2TAG_PACKET_4_0_2, L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2, L_2TAG_PACKET_7_0_2;
  Label L_2TAG_PACKET_8_0_2;
  Label L_2TAG_PACKET_12_0_2, L_2TAG_PACKET_13_0_2, B1_3, B1_5, start;

  assert_different_registers(tmp1, tmp2, eax, ecx, edx);
  jmp(start);
  address L_tbl = (address)_L_tbl;
  address log2 = (address)_log2;
  address coeff = (address)_coeff;

  bind(start);
  subq(rsp, 24);
  movsd(Address(rsp, 0), xmm0);
  mov64(rax, 0x3ff0000000000000);
  movdq(xmm2, rax);
  mov64(rdx, 0x77f0000000000000);
  movdq(xmm3, rdx);
  movl(ecx, 32768);
  movdl(xmm4, rcx);
  mov64(tmp1, 0xffffe00000000000);
  movdq(xmm5, tmp1);
  movdqu(xmm1, xmm0);
  pextrw(eax, xmm0, 3);
  por(xmm0, xmm2);
  movl(ecx, 16352);
  psrlq(xmm0, 27);
  lea(tmp2, ExternalAddress(L_tbl));
  psrld(xmm0, 2);
  rcpps(xmm0, xmm0);
  psllq(xmm1, 12);
  pshufd(xmm6, xmm5, 228);
  psrlq(xmm1, 12);
  subl(eax, 16);
  cmpl(eax, 32736);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_0_0_2);

  bind(L_2TAG_PACKET_1_0_2);
  paddd(xmm0, xmm4);
  por(xmm1, xmm3);
  movdl(edx, xmm0);
  psllq(xmm0, 29);
  pand(xmm5, xmm1);
  pand(xmm0, xmm6);
  subsd(xmm1, xmm5);
  mulpd(xmm5, xmm0);
  andl(eax, 32752);
  subl(eax, ecx);
  cvtsi2sdl(xmm7, eax);
  mulsd(xmm1, xmm0);
  movq(xmm6, ExternalAddress(log2));       // 0xfefa3800UL, 0x3fa62e42UL
  movdqu(xmm3, ExternalAddress(coeff));    // 0x92492492UL, 0x3fc24924UL, 0x00000000UL, 0xbfd00000UL
  subsd(xmm5, xmm2);
  andl(edx, 16711680);
  shrl(edx, 12);
  movdqu(xmm0, Address(tmp2, edx));
  movdqu(xmm4, ExternalAddress(16 + coeff)); // 0x3d6fb175UL, 0xbfc5555eUL, 0x55555555UL, 0x3fd55555UL
  addsd(xmm1, xmm5);
  movdqu(xmm2, ExternalAddress(32 + coeff)); // 0x9999999aUL, 0x3fc99999UL, 0x00000000UL, 0xbfe00000UL
  mulsd(xmm6, xmm7);
  movddup(xmm5, xmm1);
  mulsd(xmm7, ExternalAddress(8 + log2));    // 0x93c76730UL, 0x3ceef357UL
  mulsd(xmm3, xmm1);
  addsd(xmm0, xmm6);
  mulpd(xmm4, xmm5);
  mulpd(xmm5, xmm5);
  movddup(xmm6, xmm0);
  addsd(xmm0, xmm1);
  addpd(xmm4, xmm2);
  mulpd(xmm3, xmm5);
  subsd(xmm6, xmm0);
  mulsd(xmm4, xmm1);
  pshufd(xmm2, xmm0, 238);
  addsd(xmm1, xmm6);
  mulsd(xmm5, xmm5);
  addsd(xmm7, xmm2);
  addpd(xmm4, xmm3);
  addsd(xmm1, xmm7);
  mulpd(xmm4, xmm5);
  addsd(xmm1, xmm4);
  pshufd(xmm5, xmm4, 238);
  addsd(xmm1, xmm5);
  addsd(xmm0, xmm1);
  jmp(B1_5);

  bind(L_2TAG_PACKET_0_0_2);
  movq(xmm0, Address(rsp, 0));
  movq(xmm1, Address(rsp, 0));
  addl(eax, 16);
  cmpl(eax, 32768);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_2_0_2);
  cmpl(eax, 16);
  jcc(Assembler::below, L_2TAG_PACKET_3_0_2);

  bind(L_2TAG_PACKET_4_0_2);
  addsd(xmm0, xmm0);
  jmp(B1_5);

  bind(L_2TAG_PACKET_5_0_2);
  jcc(Assembler::above, L_2TAG_PACKET_4_0_2);
  cmpl(edx, 0);
  jcc(Assembler::above, L_2TAG_PACKET_4_0_2);
  jmp(L_2TAG_PACKET_6_0_2);

  bind(L_2TAG_PACKET_3_0_2);
  xorpd(xmm1, xmm1);
  addsd(xmm1, xmm0);
  movdl(edx, xmm1);
  psrlq(xmm1, 32);
  movdl(ecx, xmm1);
  orl(edx, ecx);
  cmpl(edx, 0);
  jcc(Assembler::equal, L_2TAG_PACKET_7_0_2);
  xorpd(xmm1, xmm1);
  movl(eax, 18416);
  pinsrw(xmm1, eax, 3);
  mulsd(xmm0, xmm1);
  movdqu(xmm1, xmm0);
  pextrw(eax, xmm0, 3);
  por(xmm0, xmm2);
  psrlq(xmm0, 27);
  movl(ecx, 18416);
  psrld(xmm0, 2);
  rcpps(xmm0, xmm0);
  psllq(xmm1, 12);
  pshufd(xmm6, xmm5, 228);
  psrlq(xmm1, 12);
  jmp(L_2TAG_PACKET_1_0_2);

  bind(L_2TAG_PACKET_2_0_2);
  movdl(edx, xmm1);
  psrlq(xmm1, 32);
  movdl(ecx, xmm1);
  addl(ecx, ecx);
  cmpl(ecx, -2097152);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_5_0_2);
  orl(edx, ecx);
  cmpl(edx, 0);
  jcc(Assembler::equal, L_2TAG_PACKET_7_0_2);

  bind(L_2TAG_PACKET_6_0_2);
  xorpd(xmm1, xmm1);
  xorpd(xmm0, xmm0);
  movl(eax, 32752);
  pinsrw(xmm1, eax, 3);
  mulsd(xmm0, xmm1);
  movl(Address(rsp, 16), 3);
  jmp(L_2TAG_PACKET_8_0_2);
  bind(L_2TAG_PACKET_7_0_2);
  xorpd(xmm1, xmm1);
  xorpd(xmm0, xmm0);
  movl(eax, 49136);
  pinsrw(xmm0, eax, 3);
  divsd(xmm0, xmm1);
  movl(Address(rsp, 16), 2);

  bind(L_2TAG_PACKET_8_0_2);
  movq(Address(rsp, 8), xmm0);

  bind(B1_3);
  movq(xmm0, Address(rsp, 8));

  bind(B1_5);
  addq(rsp, 24);
}
示例#10
0
void MacroAssembler::fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register eax, Register ecx, Register edx, Register tmp) {
  Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2;
  Label L_2TAG_PACKET_4_0_2, L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2, L_2TAG_PACKET_7_0_2;
  Label L_2TAG_PACKET_8_0_2, L_2TAG_PACKET_9_0_2, L_2TAG_PACKET_10_0_2, L_2TAG_PACKET_11_0_2;
  Label L_2TAG_PACKET_12_0_2, L_2TAG_PACKET_13_0_2, B1_3, B1_5, start;

  assert_different_registers(tmp, eax, ecx, edx);
  jmp(start);
  address static_const_table = (address)_static_const_table;

  bind(start);
  subl(rsp, 120);
  movl(Address(rsp, 64), tmp);
  lea(tmp, ExternalAddress(static_const_table));
  movdqu(xmm0, Address(rsp, 128));
  unpcklpd(xmm0, xmm0);
  movdqu(xmm1, Address(tmp, 64));          // 0x652b82feUL, 0x40571547UL, 0x652b82feUL, 0x40571547UL
  movdqu(xmm6, Address(tmp, 48));          // 0x00000000UL, 0x43380000UL, 0x00000000UL, 0x43380000UL
  movdqu(xmm2, Address(tmp, 80));          // 0xfefa0000UL, 0x3f862e42UL, 0xfefa0000UL, 0x3f862e42UL
  movdqu(xmm3, Address(tmp, 96));          // 0xbc9e3b3aUL, 0x3d1cf79aUL, 0xbc9e3b3aUL, 0x3d1cf79aUL
  pextrw(eax, xmm0, 3);
  andl(eax, 32767);
  movl(edx, 16527);
  subl(edx, eax);
  subl(eax, 15504);
  orl(edx, eax);
  cmpl(edx, INT_MIN);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_0_0_2);
  mulpd(xmm1, xmm0);
  addpd(xmm1, xmm6);
  movapd(xmm7, xmm1);
  subpd(xmm1, xmm6);
  mulpd(xmm2, xmm1);
  movdqu(xmm4, Address(tmp, 128));         // 0xe3289860UL, 0x3f56c15cUL, 0x555b9e25UL, 0x3fa55555UL
  mulpd(xmm3, xmm1);
  movdqu(xmm5, Address(tmp, 144));         // 0xc090cf0fUL, 0x3f811115UL, 0x55548ba1UL, 0x3fc55555UL
  subpd(xmm0, xmm2);
  movdl(eax, xmm7);
  movl(ecx, eax);
  andl(ecx, 63);
  shll(ecx, 4);
  sarl(eax, 6);
  movl(edx, eax);
  movdqu(xmm6, Address(tmp, 16));          // 0xffffffc0UL, 0x00000000UL, 0xffffffc0UL, 0x00000000UL
  pand(xmm7, xmm6);
  movdqu(xmm6, Address(tmp, 32));          // 0x0000ffc0UL, 0x00000000UL, 0x0000ffc0UL, 0x00000000UL
  paddq(xmm7, xmm6);
  psllq(xmm7, 46);
  subpd(xmm0, xmm3);
  movdqu(xmm2, Address(tmp, ecx, Address::times_1, 160));
  mulpd(xmm4, xmm0);
  movapd(xmm6, xmm0);
  movapd(xmm1, xmm0);
  mulpd(xmm6, xmm6);
  mulpd(xmm0, xmm6);
  addpd(xmm5, xmm4);
  mulsd(xmm0, xmm6);
  mulpd(xmm6, Address(tmp, 112));          // 0xfffffffeUL, 0x3fdfffffUL, 0xfffffffeUL, 0x3fdfffffUL
  addsd(xmm1, xmm2);
  unpckhpd(xmm2, xmm2);
  mulpd(xmm0, xmm5);
  addsd(xmm1, xmm0);
  por(xmm2, xmm7);
  unpckhpd(xmm0, xmm0);
  addsd(xmm0, xmm1);
  addsd(xmm0, xmm6);
  addl(edx, 894);
  cmpl(edx, 1916);
  jcc (Assembler::above, L_2TAG_PACKET_1_0_2);
  mulsd(xmm0, xmm2);
  addsd(xmm0, xmm2);
  jmp(L_2TAG_PACKET_2_0_2);

  bind(L_2TAG_PACKET_1_0_2);
  fnstcw(Address(rsp, 24));
  movzwl(edx, Address(rsp, 24));
  orl(edx, 768);
  movw(Address(rsp, 28), edx);
  fldcw(Address(rsp, 28));
  movl(edx, eax);
  sarl(eax, 1);
  subl(edx, eax);
  movdqu(xmm6, Address(tmp, 0));           // 0x00000000UL, 0xfff00000UL, 0x00000000UL, 0xfff00000UL
  pandn(xmm6, xmm2);
  addl(eax, 1023);
  movdl(xmm3, eax);
  psllq(xmm3, 52);
  por(xmm6, xmm3);
  addl(edx, 1023);
  movdl(xmm4, edx);
  psllq(xmm4, 52);
  movsd(Address(rsp, 8), xmm0);
  fld_d(Address(rsp, 8));
  movsd(Address(rsp, 16), xmm6);
  fld_d(Address(rsp, 16));
  fmula(1);
  faddp(1);
  movsd(Address(rsp, 8), xmm4);
  fld_d(Address(rsp, 8));
  fmulp(1);
  fstp_d(Address(rsp, 8));
  movsd(xmm0,Address(rsp, 8));
  fldcw(Address(rsp, 24));
  pextrw(ecx, xmm0, 3);
  andl(ecx, 32752);
  cmpl(ecx, 32752);
  jcc(Assembler::greaterEqual, L_2TAG_PACKET_3_0_2);
  cmpl(ecx, 0);
  jcc(Assembler::equal, L_2TAG_PACKET_4_0_2);
  jmp(L_2TAG_PACKET_2_0_2);
  cmpl(ecx, INT_MIN);
  jcc(Assembler::less, L_2TAG_PACKET_3_0_2);
  cmpl(ecx, -1064950997);
  jcc(Assembler::less, L_2TAG_PACKET_2_0_2);
  jcc(Assembler::greater, L_2TAG_PACKET_4_0_2);
  movl(edx, Address(rsp, 128));
  cmpl(edx ,-17155601);
  jcc(Assembler::less, L_2TAG_PACKET_2_0_2);
  jmp(L_2TAG_PACKET_4_0_2);

  bind(L_2TAG_PACKET_3_0_2);
  movl(edx, 14);
  jmp(L_2TAG_PACKET_5_0_2);

  bind(L_2TAG_PACKET_4_0_2);
  movl(edx, 15);

  bind(L_2TAG_PACKET_5_0_2);
  movsd(Address(rsp, 0), xmm0);
  movsd(xmm0, Address(rsp, 128));
  fld_d(Address(rsp, 0));
  jmp(L_2TAG_PACKET_6_0_2);

  bind(L_2TAG_PACKET_7_0_2);
  cmpl(eax, 2146435072);
  jcc(Assembler::greaterEqual, L_2TAG_PACKET_8_0_2);
  movl(eax, Address(rsp, 132));
  cmpl(eax, INT_MIN);
  jcc(Assembler::greaterEqual, L_2TAG_PACKET_9_0_2);
  movsd(xmm0, Address(tmp, 1208));         // 0xffffffffUL, 0x7fefffffUL
  mulsd(xmm0, xmm0);
  movl(edx, 14);
  jmp(L_2TAG_PACKET_5_0_2);

  bind(L_2TAG_PACKET_9_0_2);
  movsd(xmm0, Address(tmp, 1216));
  mulsd(xmm0, xmm0);
  movl(edx, 15);
  jmp(L_2TAG_PACKET_5_0_2);

  bind(L_2TAG_PACKET_8_0_2);
  movl(edx, Address(rsp, 128));
  cmpl(eax, 2146435072);
  jcc(Assembler::above, L_2TAG_PACKET_10_0_2);
  cmpl(edx, 0);
  jcc(Assembler::notEqual, L_2TAG_PACKET_10_0_2);
  movl(eax, Address(rsp, 132));
  cmpl(eax, 2146435072);
  jcc(Assembler::notEqual, L_2TAG_PACKET_11_0_2);
  movsd(xmm0, Address(tmp, 1192));         // 0x00000000UL, 0x7ff00000UL
  jmp(L_2TAG_PACKET_2_0_2);

  bind(L_2TAG_PACKET_11_0_2);
  movsd(xmm0, Address(tmp, 1200));         // 0x00000000UL, 0x00000000UL
  jmp(L_2TAG_PACKET_2_0_2);

  bind(L_2TAG_PACKET_10_0_2);
  movsd(xmm0, Address(rsp, 128));
  addsd(xmm0, xmm0);
  jmp(L_2TAG_PACKET_2_0_2);

  bind(L_2TAG_PACKET_0_0_2);
  movl(eax, Address(rsp, 132));
  andl(eax, 2147483647);
  cmpl(eax, 1083179008);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_7_0_2);
  movsd(xmm0, Address(rsp, 128));
  addsd(xmm0, Address(tmp, 1184));         // 0x00000000UL, 0x3ff00000UL
  jmp(L_2TAG_PACKET_2_0_2);

  bind(L_2TAG_PACKET_2_0_2);
  movsd(Address(rsp, 48), xmm0);
  fld_d(Address(rsp, 48));

  bind(L_2TAG_PACKET_6_0_2);
  movl(tmp, Address(rsp, 64));
}
示例#11
0
void MacroAssembler::fast_exp(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register eax, Register ecx, Register edx, Register tmp) {
  Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2;
  Label L_2TAG_PACKET_4_0_2, L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2, L_2TAG_PACKET_7_0_2;
  Label L_2TAG_PACKET_8_0_2, L_2TAG_PACKET_9_0_2, L_2TAG_PACKET_10_0_2, L_2TAG_PACKET_11_0_2;
  Label L_2TAG_PACKET_12_0_2, B1_3, B1_5, start;

  assert_different_registers(tmp, eax, ecx, edx);
  jmp(start);
  address cv = (address)_cv;
  address Shifter = (address)_shifter;
  address mmask = (address)_mmask;
  address bias = (address)_bias;
  address Tbl_addr = (address)_Tbl_addr;
  address ALLONES = (address)_ALLONES;
  address ebias = (address)_ebias;
  address XMAX = (address)_XMAX;
  address XMIN = (address)_XMIN;
  address INF = (address)_INF;
  address ZERO = (address)_ZERO;
  address ONE_val = (address)_ONE_val;

  bind(start);
  subq(rsp, 24);
  movsd(Address(rsp, 8), xmm0);
  unpcklpd(xmm0, xmm0);
  movdqu(xmm1, ExternalAddress(cv));       // 0x652b82feUL, 0x40571547UL, 0x652b82feUL, 0x40571547UL
  movdqu(xmm6, ExternalAddress(Shifter));  // 0x00000000UL, 0x43380000UL, 0x00000000UL, 0x43380000UL
  movdqu(xmm2, ExternalAddress(16+cv));    // 0xfefa0000UL, 0x3f862e42UL, 0xfefa0000UL, 0x3f862e42UL
  movdqu(xmm3, ExternalAddress(32+cv));    // 0xbc9e3b3aUL, 0x3d1cf79aUL, 0xbc9e3b3aUL, 0x3d1cf79aUL
  pextrw(eax, xmm0, 3);
  andl(eax, 32767);
  movl(edx, 16527);
  subl(edx, eax);
  subl(eax, 15504);
  orl(edx, eax);
  cmpl(edx, INT_MIN);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_0_0_2);
  mulpd(xmm1, xmm0);
  addpd(xmm1, xmm6);
  movapd(xmm7, xmm1);
  subpd(xmm1, xmm6);
  mulpd(xmm2, xmm1);
  movdqu(xmm4, ExternalAddress(64+cv));    // 0xe3289860UL, 0x3f56c15cUL, 0x555b9e25UL, 0x3fa55555UL
  mulpd(xmm3, xmm1);
  movdqu(xmm5, ExternalAddress(80+cv));    // 0xc090cf0fUL, 0x3f811115UL, 0x55548ba1UL, 0x3fc55555UL
  subpd(xmm0, xmm2);
  movdl(eax, xmm7);
  movl(ecx, eax);
  andl(ecx, 63);
  shll(ecx, 4);
  sarl(eax, 6);
  movl(edx, eax);
  movdqu(xmm6, ExternalAddress(mmask));    // 0xffffffc0UL, 0x00000000UL, 0xffffffc0UL, 0x00000000UL
  pand(xmm7, xmm6);
  movdqu(xmm6, ExternalAddress(bias));     // 0x0000ffc0UL, 0x00000000UL, 0x0000ffc0UL, 0x00000000UL
  paddq(xmm7, xmm6);
  psllq(xmm7, 46);
  subpd(xmm0, xmm3);
  lea(tmp, ExternalAddress(Tbl_addr));
  movdqu(xmm2, Address(ecx,tmp));
  mulpd(xmm4, xmm0);
  movapd(xmm6, xmm0);
  movapd(xmm1, xmm0);
  mulpd(xmm6, xmm6);
  mulpd(xmm0, xmm6);
  addpd(xmm5, xmm4);
  mulsd(xmm0, xmm6);
  mulpd(xmm6, ExternalAddress(48+cv));     // 0xfffffffeUL, 0x3fdfffffUL, 0xfffffffeUL, 0x3fdfffffUL
  addsd(xmm1, xmm2);
  unpckhpd(xmm2, xmm2);
  mulpd(xmm0, xmm5);
  addsd(xmm1, xmm0);
  por(xmm2, xmm7);
  unpckhpd(xmm0, xmm0);
  addsd(xmm0, xmm1);
  addsd(xmm0, xmm6);
  addl(edx, 894);
  cmpl(edx, 1916);
  jcc (Assembler::above, L_2TAG_PACKET_1_0_2);
  mulsd(xmm0, xmm2);
  addsd(xmm0, xmm2);
  jmp (B1_5);

  bind(L_2TAG_PACKET_1_0_2);
  xorpd(xmm3, xmm3);
  movdqu(xmm4, ExternalAddress(ALLONES));  // 0xffffffffUL, 0xffffffffUL, 0xffffffffUL, 0xffffffffUL
  movl(edx, -1022);
  subl(edx, eax);
  movdl(xmm5, edx);
  psllq(xmm4, xmm5);
  movl(ecx, eax);
  sarl(eax, 1);
  pinsrw(xmm3, eax, 3);
  movdqu(xmm6, ExternalAddress(ebias));    // 0x00000000UL, 0x3ff00000UL, 0x00000000UL, 0x3ff00000UL
  psllq(xmm3, 4);
  psubd(xmm2, xmm3);
  mulsd(xmm0, xmm2);
  cmpl(edx, 52);
  jcc(Assembler::greater, L_2TAG_PACKET_2_0_2);
  pand(xmm4, xmm2);
  paddd(xmm3, xmm6);
  subsd(xmm2, xmm4);
  addsd(xmm0, xmm2);
  cmpl(ecx, 1023);
  jcc(Assembler::greaterEqual, L_2TAG_PACKET_3_0_2);
  pextrw(ecx, xmm0, 3);
  andl(ecx, 32768);
  orl(edx, ecx);
  cmpl(edx, 0);
  jcc(Assembler::equal, L_2TAG_PACKET_4_0_2);
  movapd(xmm6, xmm0);
  addsd(xmm0, xmm4);
  mulsd(xmm0, xmm3);
  pextrw(ecx, xmm0, 3);
  andl(ecx, 32752);
  cmpl(ecx, 0);
  jcc(Assembler::equal, L_2TAG_PACKET_5_0_2);
  jmp(B1_5);

  bind(L_2TAG_PACKET_5_0_2);
  mulsd(xmm6, xmm3);
  mulsd(xmm4, xmm3);
  movdqu(xmm0, xmm6);
  pxor(xmm6, xmm4);
  psrad(xmm6, 31);
  pshufd(xmm6, xmm6, 85);
  psllq(xmm0, 1);
  psrlq(xmm0, 1);
  pxor(xmm0, xmm6);
  psrlq(xmm6, 63);
  paddq(xmm0, xmm6);
  paddq(xmm0, xmm4);
  movl(Address(rsp,0), 15);
  jmp(L_2TAG_PACKET_6_0_2);

  bind(L_2TAG_PACKET_4_0_2);
  addsd(xmm0, xmm4);
  mulsd(xmm0, xmm3);
  jmp(B1_5);

  bind(L_2TAG_PACKET_3_0_2);
  addsd(xmm0, xmm4);
  mulsd(xmm0, xmm3);
  pextrw(ecx, xmm0, 3);
  andl(ecx, 32752);
  cmpl(ecx, 32752);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_7_0_2);
  jmp(B1_5);

  bind(L_2TAG_PACKET_2_0_2);
  paddd(xmm3, xmm6);
  addpd(xmm0, xmm2);
  mulsd(xmm0, xmm3);
  movl(Address(rsp,0), 15);
  jmp(L_2TAG_PACKET_6_0_2);

  bind(L_2TAG_PACKET_8_0_2);
  cmpl(eax, 2146435072);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_9_0_2);
  movl(eax, Address(rsp,12));
  cmpl(eax, INT_MIN);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_10_0_2);
  movsd(xmm0, ExternalAddress(XMAX));      // 0xffffffffUL, 0x7fefffffUL
  mulsd(xmm0, xmm0);

  bind(L_2TAG_PACKET_7_0_2);
  movl(Address(rsp,0), 14);
  jmp(L_2TAG_PACKET_6_0_2);

  bind(L_2TAG_PACKET_10_0_2);
  movsd(xmm0, ExternalAddress(XMIN));      // 0x00000000UL, 0x00100000UL
  mulsd(xmm0, xmm0);
  movl(Address(rsp,0), 15);
  jmp(L_2TAG_PACKET_6_0_2);

  bind(L_2TAG_PACKET_9_0_2);
  movl(edx, Address(rsp,8));
  cmpl(eax, 2146435072);
  jcc(Assembler::above, L_2TAG_PACKET_11_0_2);
  cmpl(edx, 0);
  jcc(Assembler::notEqual, L_2TAG_PACKET_11_0_2);
  movl(eax, Address(rsp,12));
  cmpl(eax, 2146435072);
  jcc(Assembler::notEqual, L_2TAG_PACKET_12_0_2);
  movsd(xmm0, ExternalAddress(INF));       // 0x00000000UL, 0x7ff00000UL
  jmp(B1_5);

  bind(L_2TAG_PACKET_12_0_2);
  movsd(xmm0, ExternalAddress(ZERO));      // 0x00000000UL, 0x00000000UL
  jmp(B1_5);

  bind(L_2TAG_PACKET_11_0_2);
  movsd(xmm0, Address(rsp, 8));
  addsd(xmm0, xmm0);
  jmp(B1_5);

  bind(L_2TAG_PACKET_0_0_2);
  movl(eax, Address(rsp, 12));
  andl(eax, 2147483647);
  cmpl(eax, 1083179008);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_8_0_2);
  movsd(Address(rsp, 8), xmm0);
  addsd(xmm0, ExternalAddress(ONE_val));   // 0x00000000UL, 0x3ff00000UL
  jmp(B1_5);

  bind(L_2TAG_PACKET_6_0_2);
  movq(Address(rsp, 16), xmm0);

  bind(B1_3);
  movq(xmm0, Address(rsp, 16));

  bind(B1_5);
  addq(rsp, 24);
}
示例#12
0
void MacroAssembler::fast_log(XMMRegister xmm0, XMMRegister xmm1, XMMRegister xmm2, XMMRegister xmm3, XMMRegister xmm4, XMMRegister xmm5, XMMRegister xmm6, XMMRegister xmm7, Register eax, Register ecx, Register edx, Register tmp) {
  Label L_2TAG_PACKET_0_0_2, L_2TAG_PACKET_1_0_2, L_2TAG_PACKET_2_0_2, L_2TAG_PACKET_3_0_2;
  Label L_2TAG_PACKET_4_0_2, L_2TAG_PACKET_5_0_2, L_2TAG_PACKET_6_0_2, L_2TAG_PACKET_7_0_2;
  Label L_2TAG_PACKET_8_0_2, L_2TAG_PACKET_9_0_2;
  Label L_2TAG_PACKET_10_0_2, start;

  assert_different_registers(tmp, eax, ecx, edx);
  jmp(start);
  address static_const_table = (address)_static_const_table_log;

  bind(start);
  subl(rsp, 104);
  movl(Address(rsp, 40), tmp);
  lea(tmp, ExternalAddress(static_const_table));
  xorpd(xmm2, xmm2);
  movl(eax, 16368);
  pinsrw(xmm2, eax, 3);
  xorpd(xmm3, xmm3);
  movl(edx, 30704);
  pinsrw(xmm3, edx, 3);
  movsd(xmm0, Address(rsp, 112));
  movapd(xmm1, xmm0);
  movl(ecx, 32768);
  movdl(xmm4, ecx);
  movsd(xmm5, Address(tmp, 2128));         // 0x00000000UL, 0xffffe000UL
  pextrw(eax, xmm0, 3);
  por(xmm0, xmm2);
  psllq(xmm0, 5);
  movl(ecx, 16352);
  psrlq(xmm0, 34);
  rcpss(xmm0, xmm0);
  psllq(xmm1, 12);
  pshufd(xmm6, xmm5, 228);
  psrlq(xmm1, 12);
  subl(eax, 16);
  cmpl(eax, 32736);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_0_0_2);

  bind(L_2TAG_PACKET_1_0_2);
  paddd(xmm0, xmm4);
  por(xmm1, xmm3);
  movdl(edx, xmm0);
  psllq(xmm0, 29);
  pand(xmm5, xmm1);
  pand(xmm0, xmm6);
  subsd(xmm1, xmm5);
  mulpd(xmm5, xmm0);
  andl(eax, 32752);
  subl(eax, ecx);
  cvtsi2sdl(xmm7, eax);
  mulsd(xmm1, xmm0);
  movsd(xmm6, Address(tmp, 2064));         // 0xfefa3800UL, 0x3fa62e42UL
  movdqu(xmm3, Address(tmp, 2080));        // 0x92492492UL, 0x3fc24924UL, 0x00000000UL, 0xbfd00000UL
  subsd(xmm5, xmm2);
  andl(edx, 16711680);
  shrl(edx, 12);
  movdqu(xmm0, Address(tmp, edx));
  movdqu(xmm4, Address(tmp, 2096));        // 0x3d6fb175UL, 0xbfc5555eUL, 0x55555555UL, 0x3fd55555UL
  addsd(xmm1, xmm5);
  movdqu(xmm2, Address(tmp, 2112));        // 0x9999999aUL, 0x3fc99999UL, 0x00000000UL, 0xbfe00000UL
  mulsd(xmm6, xmm7);
  pshufd(xmm5, xmm1, 68);
  mulsd(xmm7, Address(tmp, 2072));         // 0x93c76730UL, 0x3ceef357UL, 0x92492492UL, 0x3fc24924UL
  mulsd(xmm3, xmm1);
  addsd(xmm0, xmm6);
  mulpd(xmm4, xmm5);
  mulpd(xmm5, xmm5);
  pshufd(xmm6, xmm0, 228);
  addsd(xmm0, xmm1);
  addpd(xmm4, xmm2);
  mulpd(xmm3, xmm5);
  subsd(xmm6, xmm0);
  mulsd(xmm4, xmm1);
  pshufd(xmm2, xmm0, 238);
  addsd(xmm1, xmm6);
  mulsd(xmm5, xmm5);
  addsd(xmm7, xmm2);
  addpd(xmm4, xmm3);
  addsd(xmm1, xmm7);
  mulpd(xmm4, xmm5);
  addsd(xmm1, xmm4);
  pshufd(xmm5, xmm4, 238);
  addsd(xmm1, xmm5);
  addsd(xmm0, xmm1);
  jmp(L_2TAG_PACKET_2_0_2);

  bind(L_2TAG_PACKET_0_0_2);
  movsd(xmm0, Address(rsp, 112));
  movdqu(xmm1, xmm0);
  addl(eax, 16);
  cmpl(eax, 32768);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_3_0_2);
  cmpl(eax, 16);
  jcc(Assembler::below, L_2TAG_PACKET_4_0_2);

  bind(L_2TAG_PACKET_5_0_2);
  addsd(xmm0, xmm0);
  jmp(L_2TAG_PACKET_2_0_2);

  bind(L_2TAG_PACKET_6_0_2);
  jcc(Assembler::above, L_2TAG_PACKET_5_0_2);
  cmpl(edx, 0);
  jcc(Assembler::above, L_2TAG_PACKET_5_0_2);
  jmp(L_2TAG_PACKET_7_0_2);

  bind(L_2TAG_PACKET_3_0_2);
  movdl(edx, xmm1);
  psrlq(xmm1, 32);
  movdl(ecx, xmm1);
  addl(ecx, ecx);
  cmpl(ecx, -2097152);
  jcc(Assembler::aboveEqual, L_2TAG_PACKET_6_0_2);
  orl(edx, ecx);
  cmpl(edx, 0);
  jcc(Assembler::equal, L_2TAG_PACKET_8_0_2);

  bind(L_2TAG_PACKET_7_0_2);
  xorpd(xmm1, xmm1);
  xorpd(xmm0, xmm0);
  movl(eax, 32752);
  pinsrw(xmm1, eax, 3);
  movl(edx, 3);
  mulsd(xmm0, xmm1);

  bind(L_2TAG_PACKET_9_0_2);
  movsd(Address(rsp, 0), xmm0);
  movsd(xmm0, Address(rsp, 112));
  fld_d(Address(rsp, 0));
  jmp(L_2TAG_PACKET_10_0_2);

  bind(L_2TAG_PACKET_8_0_2);
  xorpd(xmm1, xmm1);
  xorpd(xmm0, xmm0);
  movl(eax, 49136);
  pinsrw(xmm0, eax, 3);
  divsd(xmm0, xmm1);
  movl(edx, 2);
  jmp(L_2TAG_PACKET_9_0_2);

  bind(L_2TAG_PACKET_4_0_2);
  movdl(edx, xmm1);
  psrlq(xmm1, 32);
  movdl(ecx, xmm1);
  orl(edx, ecx);
  cmpl(edx, 0);
  jcc(Assembler::equal, L_2TAG_PACKET_8_0_2);
  xorpd(xmm1, xmm1);
  movl(eax, 18416);
  pinsrw(xmm1, eax, 3);
  mulsd(xmm0, xmm1);
  movapd(xmm1, xmm0);
  pextrw(eax, xmm0, 3);
  por(xmm0, xmm2);
  psllq(xmm0, 5);
  movl(ecx, 18416);
  psrlq(xmm0, 34);
  rcpss(xmm0, xmm0);
  psllq(xmm1, 12);
  pshufd(xmm6, xmm5, 228);
  psrlq(xmm1, 12);
  jmp(L_2TAG_PACKET_1_0_2);

  bind(L_2TAG_PACKET_2_0_2);
  movsd(Address(rsp, 24), xmm0);
  fld_d(Address(rsp, 24));

  bind(L_2TAG_PACKET_10_0_2);
  movl(tmp, Address(rsp, 40));
}
// These stubs are used by the compiler only.
// Argument registers, which must be preserved:
//   rcx - receiver (always first argument)
//   rdx - second argument (if any)
// Other registers that might be usable:
//   rax - inline cache register (is interface for itable stub)
//   rbx - method (used when calling out to interpreter)
// Available now, but may become callee-save at some point:
//   rsi, rdi
// Note that rax and rdx are also used for return values.
//
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  const int i486_code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
  // Can be NULL if there is no free space in the code cache.
  if (s == NULL) {
    return NULL;
  }

  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), i486_code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

#ifndef PRODUCT

  if (CountCompiledCalls) {
    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  }
#endif /* PRODUCT */

  // get receiver (need to skip return address on top of stack)
  assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx");

  // get receiver klass
  address npe_addr = __ pc();
  __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));

#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    // check offset vs vtable length
    __ cmpl(Address(rax, InstanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size());
    __ jcc(Assembler::greater, L);
    __ movl(rbx, vtable_index);
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), rcx, rbx);
    __ bind(L);
  }
#endif // PRODUCT

  const Register method = rbx;

  // load Method* and target address
  __ lookup_virtual_method(rax, vtable_index, method);

  if (DebugVtables) {
    Label L;
    __ cmpptr(method, (int32_t)NULL_WORD);
    __ jcc(Assembler::equal, L);
    __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
    __ jcc(Assembler::notZero, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  }

  // rax,: receiver klass
  // method (rbx): Method*
  // rcx: receiver
  address ame_addr = __ pc();
  __ jmp( Address(method, Method::from_compiled_offset()));

  masm->flush();

  if (PrintMiscellaneous && (WizardMode || Verbose)) {
    tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d",
                  vtable_index, s->entry_point(),
                  (int)(s->code_end() - s->entry_point()),
                  (int)(s->code_end() - __ pc()));
  }
  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
  // shut the door on sizing bugs
  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
  assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");

  s->set_exception_points(npe_addr, ame_addr);
  return s;
}
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
    const char *name;
    switch (type) {
    case T_BOOLEAN:
        name = "jni_fast_GetBooleanField";
        break;
    case T_BYTE:
        name = "jni_fast_GetByteField";
        break;
    case T_CHAR:
        name = "jni_fast_GetCharField";
        break;
    case T_SHORT:
        name = "jni_fast_GetShortField";
        break;
    case T_INT:
        name = "jni_fast_GetIntField";
        break;
    case T_LONG:
        name = "jni_fast_GetLongField";
        break;
    default:
        ShouldNotReachHere();
    }
    ResourceMark rm;
    BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE);
    address fast_entry = b->instructions_begin();
    CodeBuffer* cbuf = new CodeBuffer(fast_entry, b->instructions_size());
    MacroAssembler* masm = new MacroAssembler(cbuf);

    Label slow;

    address counter_addr = SafepointSynchronize::safepoint_counter_addr();
    Address ca(counter_addr, relocInfo::none);
    __ movl (rcounter, ca);
    __ movq (robj, rarg1);
    __ testb (rcounter, 1);
    __ jcc (Assembler::notZero, slow);
    if (os::is_MP()) {
        __ xorq (robj, rcounter);
        __ xorq (robj, rcounter);                   // obj, since
        // robj ^ rcounter ^ rcounter == robj
        // robj is data dependent on rcounter.
    }
    __ movq (robj, Address(robj));                // *obj
    __ movq (roffset, rarg2);
    __ shrq (roffset, 2);                         // offset

    assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
    speculative_load_pclist[count] = __ pc();
    switch (type) {
    case T_BOOLEAN:
        __ movzbl (rax, Address(robj, roffset, Address::times_1));
        break;
    case T_BYTE:
        __ movsbl (rax, Address(robj, roffset, Address::times_1));
        break;
    case T_CHAR:
        __ movzwl (rax, Address(robj, roffset, Address::times_1));
        break;
    case T_SHORT:
        __ movswl (rax, Address(robj, roffset, Address::times_1));
        break;
    case T_INT:
        __ movl   (rax, Address(robj, roffset, Address::times_1));
        break;
    case T_LONG:
        __ movq   (rax, Address(robj, roffset, Address::times_1));
        break;
    default:
        ShouldNotReachHere();
    }

    __ movq (rcounter_addr, (int64_t)counter_addr);
    ca = Address(rcounter_addr);
    if (os::is_MP()) {
        __ xorq (rcounter_addr, rax);
        __ xorq (rcounter_addr, rax);               // ca is data dependent on rax.
    }
    __ cmpl (rcounter, ca);
    __ jcc (Assembler::notEqual, slow);

    __ ret (0);

    slowcase_entry_pclist[count++] = __ pc();
    __ bind (slow);
    address slow_case_addr;
    switch (type) {
    case T_BOOLEAN:
        slow_case_addr = jni_GetBooleanField_addr();
        break;
    case T_BYTE:
        slow_case_addr = jni_GetByteField_addr();
        break;
    case T_CHAR:
        slow_case_addr = jni_GetCharField_addr();
        break;
    case T_SHORT:
        slow_case_addr = jni_GetShortField_addr();
        break;
    case T_INT:
        slow_case_addr = jni_GetIntField_addr();
        break;
    case T_LONG:
        slow_case_addr = jni_GetLongField_addr();
    }
    // tail call
    __ jmp (slow_case_addr, relocInfo::none);

    __ flush ();

    return fast_entry;
}
  //------------------------------------------------------------------------------------------------------------------------
  // Continuation point for throwing of implicit exceptions that are not handled in
  // the current activation. Fabricates an exception oop and initiates normal
  // exception dispatching in this frame. Since we need to preserve callee-saved values
  // (currently only for C2, but done for C1 as well) we need a callee-saved oop map and
  // therefore have to make these stubs into RuntimeStubs rather than BufferBlobs.
  // If the compiler needs all registers to be preserved between the fault
  // point and the exception handler then it must assume responsibility for that in
  // AbstractCompiler::continuation_for_implicit_null_exception or
  // continuation_for_implicit_division_by_zero_exception. All other implicit
  // exceptions (e.g., NullPointerException or AbstractMethodError on entry) are
  // either at call sites or otherwise assume that stack unwinding will be initiated,
  // so caller saved registers were assumed volatile in the compiler.
  //
  // Note: the routine set_pc_not_at_call_for_caller in SharedRuntime.cpp requires
  // that this code be generated into a RuntimeStub.
  address StubGenerator::generate_throw_exception(const char* name, address runtime_entry, bool restore_saved_exception_pc) {

    int insts_size = 256;
    int locs_size  = 32;

    CodeBuffer* code     = new CodeBuffer(insts_size, locs_size, 0, 0, 0, false, NULL, NULL, NULL, false, NULL, name, false);
    OopMapSet* oop_maps  = new OopMapSet();
    MacroAssembler* masm = new MacroAssembler(code);

    address start = __ pc();

    // This is an inlined and slightly modified version of call_VM
    // which has the ability to fetch the return PC out of
    // thread-local storage and also sets up last_Java_sp slightly
    // differently than the real call_VM
    Register java_thread = ebx;
    __ get_thread(java_thread);
    if (restore_saved_exception_pc) {
      __ movl(eax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset())));
      __ pushl(eax);
    }
      
#ifndef COMPILER2
    __ enter(); // required for proper stackwalking of RuntimeStub frame
#endif COMPILER2

    __ subl(esp, framesize * wordSize); // prolog

#ifdef COMPILER2
    if( OptoRuntimeCalleeSavedFloats ) {
      if( UseSSE == 1 ) {
        __ movss(Address(esp,xmm6_off*wordSize),xmm6);
        __ movss(Address(esp,xmm7_off*wordSize),xmm7);
      } else if( UseSSE == 2 ) {
        __ movsd(Address(esp,xmm6_off*wordSize),xmm6);
        __ movsd(Address(esp,xmm7_off*wordSize),xmm7);
      }
    }
#endif /* COMPILER2 */
    __ movl(Address(esp, ebp_off * wordSize), ebp);
    __ movl(Address(esp, edi_off * wordSize), edi);
    __ movl(Address(esp, esi_off * wordSize), esi);

    // push java thread (becomes first argument of C function)
    __ movl(Address(esp, thread_off * wordSize), java_thread);

    // Set up last_Java_sp and last_Java_fp
    __ set_last_Java_frame(java_thread, esp, ebp, NULL);

    // Call runtime
    __ call(runtime_entry, relocInfo::runtime_call_type);
    // Generate oop map
    OopMap* map =  new OopMap(framesize, 0);        
#ifdef COMPILER2
    // SharedInfo is apparently not initialized if -Xint is specified
    if (UseCompiler) {
      map->set_callee_saved(SharedInfo::stack2reg(ebp_off), framesize, 0, OptoReg::Name(EBP_num));
      map->set_callee_saved(SharedInfo::stack2reg(edi_off), framesize, 0, OptoReg::Name(EDI_num));
      map->set_callee_saved(SharedInfo::stack2reg(esi_off), framesize, 0, OptoReg::Name(ESI_num));
      if( OptoRuntimeCalleeSavedFloats ) {
        map->set_callee_saved(SharedInfo::stack2reg(xmm6_off  ), framesize, 0, OptoReg::Name(XMM6a_num));
        map->set_callee_saved(SharedInfo::stack2reg(xmm6_off+1), framesize, 0, OptoReg::Name(XMM6b_num));
        map->set_callee_saved(SharedInfo::stack2reg(xmm7_off  ), framesize, 0, OptoReg::Name(XMM7a_num));
        map->set_callee_saved(SharedInfo::stack2reg(xmm7_off+1), framesize, 0, OptoReg::Name(XMM7b_num));
      }
    }
#endif
#ifdef COMPILER1
    map->set_callee_saved(OptoReg::Name(SharedInfo::stack0+ebp_off), framesize, 0, OptoReg::Name(ebp->encoding()));
    map->set_callee_saved(OptoReg::Name(SharedInfo::stack0+esi_off), framesize, 0, OptoReg::Name(esi->encoding()));
    map->set_callee_saved(OptoReg::Name(SharedInfo::stack0+edi_off), framesize, 0, OptoReg::Name(edi->encoding()));
#endif
    oop_maps->add_gc_map(__ pc() - start, true, map);
      
    // restore the thread (cannot use the pushed argument since arguments
    // may be overwritten by C code generated by an optimizing compiler);
    // however can use the register value directly if it is callee saved.
    __ get_thread(java_thread);

    __ reset_last_Java_frame(java_thread, false);

    // Restore callee save registers.  This must be done after resetting the Java frame
#ifdef COMPILER2
    if( OptoRuntimeCalleeSavedFloats ) {
      if( UseSSE == 1 ) {
        __ movss(xmm6,Address(esp,xmm6_off*wordSize));
        __ movss(xmm7,Address(esp,xmm7_off*wordSize));
      } else if( UseSSE == 2 ) {
        __ movsd(xmm6,Address(esp,xmm6_off*wordSize));
        __ movsd(xmm7,Address(esp,xmm7_off*wordSize));
      }
    }
#endif /* COMPILER2 */
    __ movl(ebp,Address(esp, ebp_off * wordSize));
    __ movl(edi,Address(esp, edi_off * wordSize));
    __ movl(esi,Address(esp, esi_off * wordSize));

    // discard arguments
    __ addl(esp, framesize * wordSize); // epilog

#ifndef COMPILER2
    __ leave(); // required for proper stackwalking of RuntimeStub frame
#endif COMPILER2

    // check for pending exceptions
#ifdef ASSERT
    Label L;
    __ cmpl(Address(java_thread, Thread::pending_exception_offset()), (int)NULL);
    __ jcc(Assembler::notEqual, L);
    __ should_not_reach_here();
    __ bind(L);
#endif ASSERT
    __ jmp(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type);

    // Note: it seems the frame size reported to the RuntimeStub has
    // to be incremented by 1 to account for the return PC. It
    // definitely must be one more than the amount by which SP was
    // decremented.
    int extra_words = 1;
#ifdef COMPILER1
    ++extra_words; // Not strictly necessary since C1 ignores frame size and uses link
#endif COMPILER1

    RuntimeStub* stub = RuntimeStub::new_runtime_stub(name, code, framesize + extra_words, oop_maps, false);
    return stub->entry_point();
  }
示例#16
0
void CompilerStubs::generate_compiler_new_obj_array() {
  comment_section("Compiler stub: new object array");
  comment("- ebx holds the prototypical near of the array class");

  entry("compiler_new_obj_array");
  comment("Get array length");
  // add BytesPerWord for return address
  movl(edx, Address(esp, Constant(BytesPerWord + JavaFrame::arg_offset_from_sp(0))));

  Label slow_case;
  comment("Check if the array length is too large or negative");
  cmpl(edx, Constant(maximum_safe_array_length));
  jcc(above, Constant(slow_case));

  comment("Get _inline_allocation_top");
  movl(eax, Address(Constant("_inline_allocation_top")));

  if (GenerateDebugAssembly) {
    comment("Check ExcessiveGC");
    testl(Address(Constant("ExcessiveGC")), Constant(0));
    jcc(not_zero, Constant(slow_case));
  }

  comment("Compute new top");
  leal(ecx, Address(eax, edx, times_4, Constant(Array::base_offset())));

  comment("Check for overflow");
  cmpl(ecx, eax);
  jcc(below, Constant(slow_case));

  comment("Compare against _inline_allocation_end");
  cmpl(ecx, Address(Constant("_inline_allocation_end")));
  jcc(above, Constant(slow_case));

  comment("Allocation succeeded, set _inline_allocation_top");
  movl(Address(Constant("_inline_allocation_top")), ecx);

  comment("Set prototypical near in object; no need for write barrier");
  movl(Address(eax), ebx);

  comment("Set the length");
  movl(Address(eax, Constant(Array::length_offset())), edx);

  comment("Compute remaining size");
  testl(edx, edx);

  comment("Empty array?");
  Label init_done;
  jcc(equal, Constant(init_done));

  comment("Zero array elements");
  xorl(ecx, ecx);
  Label init_loop;
  bind(init_loop);
  movl(Address(eax, edx, times_4, Constant(Array::base_offset() - oopSize)), ecx);
  decrement(edx, 1);
  jcc(not_zero, Constant(init_loop));
  bind(init_done);

  comment("The newly allocated array is in register eax");
  ret();

  comment("Slow case - call the VM runtime system");
  bind(slow_case);
  leal(eax, Address(Constant("anewarray")));
  goto_shared_call_vm(T_ARRAY);

  entry_end(); // compiler_new_obj_array
}
示例#17
0
void MethodHandles::generate_method_handle_dispatch(MacroAssembler* _masm,
                                                    vmIntrinsics::ID iid,
                                                    Register receiver_reg,
                                                    Register member_reg,
                                                    bool for_compiler_entry) {
  assert(is_signature_polymorphic(iid), "expected invoke iid");
  Register rbx_method = rbx;   // eventual target of this invocation
  // temps used in this code are not used in *either* compiled or interpreted calling sequences
#ifdef _LP64
  Register temp1 = rscratch1;
  Register temp2 = rscratch2;
  Register temp3 = rax;
  if (for_compiler_entry) {
    assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : j_rarg0), "only valid assignment");
    assert_different_registers(temp1,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
    assert_different_registers(temp2,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
    assert_different_registers(temp3,        j_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4, j_rarg5);
  }
#else
  Register temp1 = (for_compiler_entry ? rsi : rdx);
  Register temp2 = rdi;
  Register temp3 = rax;
  if (for_compiler_entry) {
    assert(receiver_reg == (iid == vmIntrinsics::_linkToStatic ? noreg : rcx), "only valid assignment");
    assert_different_registers(temp1,        rcx, rdx);
    assert_different_registers(temp2,        rcx, rdx);
    assert_different_registers(temp3,        rcx, rdx);
  }
#endif
  else {
    assert_different_registers(temp1, temp2, temp3, saved_last_sp_register());  // don't trash lastSP
  }
  assert_different_registers(temp1, temp2, temp3, receiver_reg);
  assert_different_registers(temp1, temp2, temp3, member_reg);

  if (iid == vmIntrinsics::_invokeBasic) {
    // indirect through MH.form.vmentry.vmtarget
    jump_to_lambda_form(_masm, receiver_reg, rbx_method, temp1, for_compiler_entry);

  } else {
    // The method is a member invoker used by direct method handles.
    if (VerifyMethodHandles) {
      // make sure the trailing argument really is a MemberName (caller responsibility)
      verify_klass(_masm, member_reg, SystemDictionary::WK_KLASS_ENUM_NAME(java_lang_invoke_MemberName),
                   "MemberName required for invokeVirtual etc.");
    }

    Address member_clazz(    member_reg, NONZERO(java_lang_invoke_MemberName::clazz_offset_in_bytes()));
    Address member_vmindex(  member_reg, NONZERO(java_lang_invoke_MemberName::vmindex_offset_in_bytes()));
    Address member_vmtarget( member_reg, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()));

    Register temp1_recv_klass = temp1;
    if (iid != vmIntrinsics::_linkToStatic) {
      __ verify_oop(receiver_reg);
      if (iid == vmIntrinsics::_linkToSpecial) {
        // Don't actually load the klass; just null-check the receiver.
        __ null_check(receiver_reg);
      } else {
        // load receiver klass itself
        __ null_check(receiver_reg, oopDesc::klass_offset_in_bytes());
        __ load_klass(temp1_recv_klass, receiver_reg);
        __ verify_klass_ptr(temp1_recv_klass);
      }
      BLOCK_COMMENT("check_receiver {");
      // The receiver for the MemberName must be in receiver_reg.
      // Check the receiver against the MemberName.clazz
      if (VerifyMethodHandles && iid == vmIntrinsics::_linkToSpecial) {
        // Did not load it above...
        __ load_klass(temp1_recv_klass, receiver_reg);
        __ verify_klass_ptr(temp1_recv_klass);
      }
      if (VerifyMethodHandles && iid != vmIntrinsics::_linkToInterface) {
        Label L_ok;
        Register temp2_defc = temp2;
        __ load_heap_oop(temp2_defc, member_clazz);
        load_klass_from_Class(_masm, temp2_defc);
        __ verify_klass_ptr(temp2_defc);
        __ check_klass_subtype(temp1_recv_klass, temp2_defc, temp3, L_ok);
        // If we get here, the type check failed!
        __ STOP("receiver class disagrees with MemberName.clazz");
        __ bind(L_ok);
      }
      BLOCK_COMMENT("} check_receiver");
    }
    if (iid == vmIntrinsics::_linkToSpecial ||
        iid == vmIntrinsics::_linkToStatic) {
      DEBUG_ONLY(temp1_recv_klass = noreg);  // these guys didn't load the recv_klass
    }

    // Live registers at this point:
    //  member_reg - MemberName that was the trailing argument
    //  temp1_recv_klass - klass of stacked receiver, if needed
    //  rsi/r13 - interpreter linkage (if interpreted)
    //  rcx, rdx, rsi, rdi, r8, r8 - compiler arguments (if compiled)

    Label L_incompatible_class_change_error;
    switch (iid) {
    case vmIntrinsics::_linkToSpecial:
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeSpecial, member_reg, temp3);
      }
      __ movptr(rbx_method, member_vmtarget);
      break;

    case vmIntrinsics::_linkToStatic:
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeStatic, member_reg, temp3);
      }
      __ movptr(rbx_method, member_vmtarget);
      break;

    case vmIntrinsics::_linkToVirtual:
    {
      // same as TemplateTable::invokevirtual,
      // minus the CP setup and profiling:

      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeVirtual, member_reg, temp3);
      }

      // pick out the vtable index from the MemberName, and then we can discard it:
      Register temp2_index = temp2;
      __ movptr(temp2_index, member_vmindex);

      if (VerifyMethodHandles) {
        Label L_index_ok;
        __ cmpl(temp2_index, 0);
        __ jcc(Assembler::greaterEqual, L_index_ok);
        __ STOP("no virtual index");
        __ BIND(L_index_ok);
      }

      // Note:  The verifier invariants allow us to ignore MemberName.clazz and vmtarget
      // at this point.  And VerifyMethodHandles has already checked clazz, if needed.

      // get target Method* & entry point
      __ lookup_virtual_method(temp1_recv_klass, temp2_index, rbx_method);
      break;
    }

    case vmIntrinsics::_linkToInterface:
    {
      // same as TemplateTable::invokeinterface
      // (minus the CP setup and profiling, with different argument motion)
      if (VerifyMethodHandles) {
        verify_ref_kind(_masm, JVM_REF_invokeInterface, member_reg, temp3);
      }

      Register temp3_intf = temp3;
      __ load_heap_oop(temp3_intf, member_clazz);
      load_klass_from_Class(_masm, temp3_intf);
      __ verify_klass_ptr(temp3_intf);

      Register rbx_index = rbx_method;
      __ movptr(rbx_index, member_vmindex);
      if (VerifyMethodHandles) {
        Label L;
        __ cmpl(rbx_index, 0);
        __ jcc(Assembler::greaterEqual, L);
        __ STOP("invalid vtable index for MH.invokeInterface");
        __ bind(L);
      }

      // given intf, index, and recv klass, dispatch to the implementation method
      __ lookup_interface_method(temp1_recv_klass, temp3_intf,
                                 // note: next two args must be the same:
                                 rbx_index, rbx_method,
                                 temp2,
                                 L_incompatible_class_change_error);
      break;
    }

    default:
      fatal(err_msg_res("unexpected intrinsic %d: %s", iid, vmIntrinsics::name_at(iid)));
      break;
    }

    // Live at this point:
    //   rbx_method
    //   rsi/r13 (if interpreted)

    // After figuring out which concrete method to call, jump into it.
    // Note that this works in the interpreter with no data motion.
    // But the compiled version will require that rcx_recv be shifted out.
    __ verify_method_ptr(rbx_method);
    jump_from_method_handle(_masm, rbx_method, temp1, for_compiler_entry);

    if (iid == vmIntrinsics::_linkToInterface) {
      __ bind(L_incompatible_class_change_error);
      __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
    }
  }
}
示例#18
0
//------------------------------------------------------------------------------
// MethodHandles::generate_method_handle_stub
//
// Generate an "entry" field for a method handle.
// This determines how the method handle will respond to calls.
void MethodHandles::generate_method_handle_stub(MacroAssembler* _masm, MethodHandles::EntryKind ek) {
  // Here is the register state during an interpreted call,
  // as set up by generate_method_handle_interpreter_entry():
  // - rbx: garbage temp (was MethodHandle.invoke methodOop, unused)
  // - rcx: receiver method handle
  // - rax: method handle type (only used by the check_mtype entry point)
  // - rsi/r13: sender SP (must preserve; see prepare_to_jump_from_interpreted)
  // - rdx: garbage temp, can blow away

  const Register rcx_recv    = rcx;
  const Register rax_argslot = rax;
  const Register rbx_temp    = rbx;
  const Register rdx_temp    = rdx;

  // This guy is set up by prepare_to_jump_from_interpreted (from interpreted calls)
  // and gen_c2i_adapter (from compiled calls):
  const Register saved_last_sp = LP64_ONLY(r13) NOT_LP64(rsi);

  // Argument registers for _raise_exception.
  // 32-bit: Pass first two oop/int args in registers ECX and EDX.
  const Register rarg0_code     = LP64_ONLY(j_rarg0) NOT_LP64(rcx);
  const Register rarg1_actual   = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
  const Register rarg2_required = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
  assert_different_registers(rarg0_code, rarg1_actual, rarg2_required, saved_last_sp);

  guarantee(java_lang_invoke_MethodHandle::vmentry_offset_in_bytes() != 0, "must have offsets");

  // some handy addresses
  Address rbx_method_fie(     rbx,      methodOopDesc::from_interpreted_offset() );
  Address rbx_method_fce(     rbx,      methodOopDesc::from_compiled_offset() );

  Address rcx_mh_vmtarget(    rcx_recv, java_lang_invoke_MethodHandle::vmtarget_offset_in_bytes() );
  Address rcx_dmh_vmindex(    rcx_recv, java_lang_invoke_DirectMethodHandle::vmindex_offset_in_bytes() );

  Address rcx_bmh_vmargslot(  rcx_recv, java_lang_invoke_BoundMethodHandle::vmargslot_offset_in_bytes() );
  Address rcx_bmh_argument(   rcx_recv, java_lang_invoke_BoundMethodHandle::argument_offset_in_bytes() );

  Address rcx_amh_vmargslot(  rcx_recv, java_lang_invoke_AdapterMethodHandle::vmargslot_offset_in_bytes() );
  Address rcx_amh_argument(   rcx_recv, java_lang_invoke_AdapterMethodHandle::argument_offset_in_bytes() );
  Address rcx_amh_conversion( rcx_recv, java_lang_invoke_AdapterMethodHandle::conversion_offset_in_bytes() );
  Address vmarg;                // __ argument_address(vmargslot)

  const int java_mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();

  if (have_entry(ek)) {
    __ nop();                   // empty stubs make SG sick
    return;
  }

  address interp_entry = __ pc();

  trace_method_handle(_masm, entry_name(ek));

  BLOCK_COMMENT(entry_name(ek));

  switch ((int) ek) {
  case _raise_exception:
    {
      // Not a real MH entry, but rather shared code for raising an
      // exception.  Since we use the compiled entry, arguments are
      // expected in compiler argument registers.
      assert(raise_exception_method(), "must be set");
      assert(raise_exception_method()->from_compiled_entry(), "method must be linked");

      const Register rdi_pc = rax;
      __ pop(rdi_pc);  // caller PC
      __ mov(rsp, saved_last_sp);  // cut the stack back to where the caller started

      Register rbx_method = rbx_temp;
      Label L_no_method;
      // FIXME: fill in _raise_exception_method with a suitable java.lang.invoke method
      __ movptr(rbx_method, ExternalAddress((address) &_raise_exception_method));
      __ testptr(rbx_method, rbx_method);
      __ jccb(Assembler::zero, L_no_method);

      const int jobject_oop_offset = 0;
      __ movptr(rbx_method, Address(rbx_method, jobject_oop_offset));  // dereference the jobject
      __ testptr(rbx_method, rbx_method);
      __ jccb(Assembler::zero, L_no_method);
      __ verify_oop(rbx_method);

      NOT_LP64(__ push(rarg2_required));
      __ push(rdi_pc);         // restore caller PC
      __ jmp(rbx_method_fce);  // jump to compiled entry

      // Do something that is at least causes a valid throw from the interpreter.
      __ bind(L_no_method);
      __ push(rarg2_required);
      __ push(rarg1_actual);
      __ jump(ExternalAddress(Interpreter::throw_WrongMethodType_entry()));
    }
    break;

  case _invokestatic_mh:
  case _invokespecial_mh:
    {
      Register rbx_method = rbx_temp;
      __ load_heap_oop(rbx_method, rcx_mh_vmtarget); // target is a methodOop
      __ verify_oop(rbx_method);
      // same as TemplateTable::invokestatic or invokespecial,
      // minus the CP setup and profiling:
      if (ek == _invokespecial_mh) {
        // Must load & check the first argument before entering the target method.
        __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
        __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
        __ null_check(rcx_recv);
        __ verify_oop(rcx_recv);
      }
      __ jmp(rbx_method_fie);
    }
    break;

  case _invokevirtual_mh:
    {
      // same as TemplateTable::invokevirtual,
      // minus the CP setup and profiling:

      // pick out the vtable index and receiver offset from the MH,
      // and then we can discard it:
      __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
      Register rbx_index = rbx_temp;
      __ movl(rbx_index, rcx_dmh_vmindex);
      // Note:  The verifier allows us to ignore rcx_mh_vmtarget.
      __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
      __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());

      // get receiver klass
      Register rax_klass = rax_argslot;
      __ load_klass(rax_klass, rcx_recv);
      __ verify_oop(rax_klass);

      // get target methodOop & entry point
      const int base = instanceKlass::vtable_start_offset() * wordSize;
      assert(vtableEntry::size() * wordSize == wordSize, "adjust the scaling in the code below");
      Address vtable_entry_addr(rax_klass,
                                rbx_index, Address::times_ptr,
                                base + vtableEntry::method_offset_in_bytes());
      Register rbx_method = rbx_temp;
      __ movptr(rbx_method, vtable_entry_addr);

      __ verify_oop(rbx_method);
      __ jmp(rbx_method_fie);
    }
    break;

  case _invokeinterface_mh:
    {
      // same as TemplateTable::invokeinterface,
      // minus the CP setup and profiling:

      // pick out the interface and itable index from the MH.
      __ load_method_handle_vmslots(rax_argslot, rcx_recv, rdx_temp);
      Register rdx_intf  = rdx_temp;
      Register rbx_index = rbx_temp;
      __ load_heap_oop(rdx_intf, rcx_mh_vmtarget);
      __ movl(rbx_index, rcx_dmh_vmindex);
      __ movptr(rcx_recv, __ argument_address(rax_argslot, -1));
      __ null_check(rcx_recv, oopDesc::klass_offset_in_bytes());

      // get receiver klass
      Register rax_klass = rax_argslot;
      __ load_klass(rax_klass, rcx_recv);
      __ verify_oop(rax_klass);

      Register rdi_temp   = rdi;
      Register rbx_method = rbx_index;

      // get interface klass
      Label no_such_interface;
      __ verify_oop(rdx_intf);
      __ lookup_interface_method(rax_klass, rdx_intf,
                                 // note: next two args must be the same:
                                 rbx_index, rbx_method,
                                 rdi_temp,
                                 no_such_interface);

      __ verify_oop(rbx_method);
      __ jmp(rbx_method_fie);
      __ hlt();

      __ bind(no_such_interface);
      // Throw an exception.
      // For historical reasons, it will be IncompatibleClassChangeError.
      __ mov(rbx_temp, rcx_recv);  // rarg2_required might be RCX
      assert_different_registers(rarg2_required, rbx_temp);
      __ movptr(rarg2_required, Address(rdx_intf, java_mirror_offset));  // required interface
      __ mov(   rarg1_actual,   rbx_temp);                               // bad receiver
      __ movl(  rarg0_code,     (int) Bytecodes::_invokeinterface);      // who is complaining?
      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));
    }
    break;

  case _bound_ref_mh:
  case _bound_int_mh:
  case _bound_long_mh:
  case _bound_ref_direct_mh:
  case _bound_int_direct_mh:
  case _bound_long_direct_mh:
    {
      bool direct_to_method = (ek >= _bound_ref_direct_mh);
      BasicType arg_type  = T_ILLEGAL;
      int       arg_mask  = _INSERT_NO_MASK;
      int       arg_slots = -1;
      get_ek_bound_mh_info(ek, arg_type, arg_mask, arg_slots);

      // make room for the new argument:
      __ movl(rax_argslot, rcx_bmh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot));

      insert_arg_slots(_masm, arg_slots * stack_move_unit(), arg_mask, rax_argslot, rbx_temp, rdx_temp);

      // store bound argument into the new stack slot:
      __ load_heap_oop(rbx_temp, rcx_bmh_argument);
      if (arg_type == T_OBJECT) {
        __ movptr(Address(rax_argslot, 0), rbx_temp);
      } else {
        Address prim_value_addr(rbx_temp, java_lang_boxing_object::value_offset_in_bytes(arg_type));
        const int arg_size = type2aelembytes(arg_type);
        __ load_sized_value(rdx_temp, prim_value_addr, arg_size, is_signed_subword_type(arg_type), rbx_temp);
        __ store_sized_value(Address(rax_argslot, 0), rdx_temp, arg_size, rbx_temp);
      }

      if (direct_to_method) {
        Register rbx_method = rbx_temp;
        __ load_heap_oop(rbx_method, rcx_mh_vmtarget);
        __ verify_oop(rbx_method);
        __ jmp(rbx_method_fie);
      } else {
        __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
        __ verify_oop(rcx_recv);
        __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
      }
    }
    break;

  case _adapter_retype_only:
  case _adapter_retype_raw:
    // immediately jump to the next MH layer:
    __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
    __ verify_oop(rcx_recv);
    __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    // This is OK when all parameter types widen.
    // It is also OK when a return type narrows.
    break;

  case _adapter_check_cast:
    {
      // temps:
      Register rbx_klass = rbx_temp; // interesting AMH data

      // check a reference argument before jumping to the next layer of MH:
      __ movl(rax_argslot, rcx_amh_vmargslot);
      vmarg = __ argument_address(rax_argslot);

      // What class are we casting to?
      __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
      __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));

      Label done;
      __ movptr(rdx_temp, vmarg);
      __ testptr(rdx_temp, rdx_temp);
      __ jcc(Assembler::zero, done);         // no cast if null
      __ load_klass(rdx_temp, rdx_temp);

      // live at this point:
      // - rbx_klass:  klass required by the target method
      // - rdx_temp:   argument klass to test
      // - rcx_recv:   adapter method handle
      __ check_klass_subtype(rdx_temp, rbx_klass, rax_argslot, done);

      // If we get here, the type check failed!
      // Call the wrong_method_type stub, passing the failing argument type in rax.
      Register rax_mtype = rax_argslot;
      __ movl(rax_argslot, rcx_amh_vmargslot);  // reload argslot field
      __ movptr(rdx_temp, vmarg);

      assert_different_registers(rarg2_required, rdx_temp);
      __ load_heap_oop(rarg2_required, rcx_amh_argument);             // required class
      __ mov(          rarg1_actual,   rdx_temp);                     // bad object
      __ movl(         rarg0_code,     (int) Bytecodes::_checkcast);  // who is complaining?
      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));

      __ bind(done);
      // get the new MH:
      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_prim_to_prim:
  case _adapter_ref_to_prim:
    // handled completely by optimized cases
    __ stop("init_AdapterMethodHandle should not issue this");
    break;

  case _adapter_opt_i2i:        // optimized subcase of adapt_prim_to_prim
//case _adapter_opt_f2i:        // optimized subcase of adapt_prim_to_prim
  case _adapter_opt_l2i:        // optimized subcase of adapt_prim_to_prim
  case _adapter_opt_unboxi:     // optimized subcase of adapt_ref_to_prim
    {
      // perform an in-place conversion to int or an int subword
      __ movl(rax_argslot, rcx_amh_vmargslot);
      vmarg = __ argument_address(rax_argslot);

      switch (ek) {
      case _adapter_opt_i2i:
        __ movl(rdx_temp, vmarg);
        break;
      case _adapter_opt_l2i:
        {
          // just delete the extra slot; on a little-endian machine we keep the first
          __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
          remove_arg_slots(_masm, -stack_move_unit(),
                           rax_argslot, rbx_temp, rdx_temp);
          vmarg = Address(rax_argslot, -Interpreter::stackElementSize);
          __ movl(rdx_temp, vmarg);
        }
        break;
      case _adapter_opt_unboxi:
        {
          // Load the value up from the heap.
          __ movptr(rdx_temp, vmarg);
          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_INT);
#ifdef ASSERT
          for (int bt = T_BOOLEAN; bt < T_INT; bt++) {
            if (is_subword_type(BasicType(bt)))
              assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(BasicType(bt)), "");
          }
#endif
          __ null_check(rdx_temp, value_offset);
          __ movl(rdx_temp, Address(rdx_temp, value_offset));
          // We load this as a word.  Because we are little-endian,
          // the low bits will be correct, but the high bits may need cleaning.
          // The vminfo will guide us to clean those bits.
        }
        break;
      default:
        ShouldNotReachHere();
      }

      // Do the requested conversion and store the value.
      Register rbx_vminfo = rbx_temp;
      __ movl(rbx_vminfo, rcx_amh_conversion);
      assert(CONV_VMINFO_SHIFT == 0, "preshifted");

      // get the new MH:
      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      // (now we are done with the old MH)

      // original 32-bit vmdata word must be of this form:
      //    | MBZ:6 | signBitCount:8 | srcDstTypes:8 | conversionOp:8 |
      __ xchgptr(rcx, rbx_vminfo);                // free rcx for shifts
      __ shll(rdx_temp /*, rcx*/);
      Label zero_extend, done;
      __ testl(rcx, CONV_VMINFO_SIGN_FLAG);
      __ jccb(Assembler::zero, zero_extend);

      // this path is taken for int->byte, int->short
      __ sarl(rdx_temp /*, rcx*/);
      __ jmpb(done);

      __ bind(zero_extend);
      // this is taken for int->char
      __ shrl(rdx_temp /*, rcx*/);

      __ bind(done);
      __ movl(vmarg, rdx_temp);  // Store the value.
      __ xchgptr(rcx, rbx_vminfo);                // restore rcx_recv

      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_opt_i2l:        // optimized subcase of adapt_prim_to_prim
  case _adapter_opt_unboxl:     // optimized subcase of adapt_ref_to_prim
    {
      // perform an in-place int-to-long or ref-to-long conversion
      __ movl(rax_argslot, rcx_amh_vmargslot);

      // on a little-endian machine we keep the first slot and add another after
      __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
      insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
                       rax_argslot, rbx_temp, rdx_temp);
      Address vmarg1(rax_argslot, -Interpreter::stackElementSize);
      Address vmarg2 = vmarg1.plus_disp(Interpreter::stackElementSize);

      switch (ek) {
      case _adapter_opt_i2l:
        {
#ifdef _LP64
          __ movslq(rdx_temp, vmarg1);  // Load sign-extended
          __ movq(vmarg1, rdx_temp);    // Store into first slot
#else
          __ movl(rdx_temp, vmarg1);
          __ sarl(rdx_temp, BitsPerInt - 1);  // __ extend_sign()
          __ movl(vmarg2, rdx_temp); // store second word
#endif
        }
        break;
      case _adapter_opt_unboxl:
        {
          // Load the value up from the heap.
          __ movptr(rdx_temp, vmarg1);
          int value_offset = java_lang_boxing_object::value_offset_in_bytes(T_LONG);
          assert(value_offset == java_lang_boxing_object::value_offset_in_bytes(T_DOUBLE), "");
          __ null_check(rdx_temp, value_offset);
#ifdef _LP64
          __ movq(rbx_temp, Address(rdx_temp, value_offset));
          __ movq(vmarg1, rbx_temp);
#else
          __ movl(rbx_temp, Address(rdx_temp, value_offset + 0*BytesPerInt));
          __ movl(rdx_temp, Address(rdx_temp, value_offset + 1*BytesPerInt));
          __ movl(vmarg1, rbx_temp);
          __ movl(vmarg2, rdx_temp);
#endif
        }
        break;
      default:
        ShouldNotReachHere();
      }

      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_opt_f2d:        // optimized subcase of adapt_prim_to_prim
  case _adapter_opt_d2f:        // optimized subcase of adapt_prim_to_prim
    {
      // perform an in-place floating primitive conversion
      __ movl(rax_argslot, rcx_amh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot, 1));
      if (ek == _adapter_opt_f2d) {
        insert_arg_slots(_masm, stack_move_unit(), _INSERT_INT_MASK,
                         rax_argslot, rbx_temp, rdx_temp);
      }
      Address vmarg(rax_argslot, -Interpreter::stackElementSize);

#ifdef _LP64
      if (ek == _adapter_opt_f2d) {
        __ movflt(xmm0, vmarg);
        __ cvtss2sd(xmm0, xmm0);
        __ movdbl(vmarg, xmm0);
      } else {
        __ movdbl(xmm0, vmarg);
        __ cvtsd2ss(xmm0, xmm0);
        __ movflt(vmarg, xmm0);
      }
#else //_LP64
      if (ek == _adapter_opt_f2d) {
        __ fld_s(vmarg);        // load float to ST0
        __ fstp_s(vmarg);       // store single
      } else {
        __ fld_d(vmarg);        // load double to ST0
        __ fstp_s(vmarg);       // store single
      }
#endif //_LP64

      if (ek == _adapter_opt_d2f) {
        remove_arg_slots(_masm, -stack_move_unit(),
                         rax_argslot, rbx_temp, rdx_temp);
      }

      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_prim_to_ref:
    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
    break;

  case _adapter_swap_args:
  case _adapter_rot_args:
    // handled completely by optimized cases
    __ stop("init_AdapterMethodHandle should not issue this");
    break;

  case _adapter_opt_swap_1:
  case _adapter_opt_swap_2:
  case _adapter_opt_rot_1_up:
  case _adapter_opt_rot_1_down:
  case _adapter_opt_rot_2_up:
  case _adapter_opt_rot_2_down:
    {
      int swap_bytes = 0, rotate = 0;
      get_ek_adapter_opt_swap_rot_info(ek, swap_bytes, rotate);

      // 'argslot' is the position of the first argument to swap
      __ movl(rax_argslot, rcx_amh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot));

      // 'vminfo' is the second
      Register rbx_destslot = rbx_temp;
      __ movl(rbx_destslot, rcx_amh_conversion);
      assert(CONV_VMINFO_SHIFT == 0, "preshifted");
      __ andl(rbx_destslot, CONV_VMINFO_MASK);
      __ lea(rbx_destslot, __ argument_address(rbx_destslot));
      DEBUG_ONLY(verify_argslot(_masm, rbx_destslot, "swap point must fall within current frame"));

      if (!rotate) {
        for (int i = 0; i < swap_bytes; i += wordSize) {
          __ movptr(rdx_temp, Address(rax_argslot , i));
          __ push(rdx_temp);
          __ movptr(rdx_temp, Address(rbx_destslot, i));
          __ movptr(Address(rax_argslot, i), rdx_temp);
          __ pop(rdx_temp);
          __ movptr(Address(rbx_destslot, i), rdx_temp);
        }
      } else {
        // push the first chunk, which is going to get overwritten
        for (int i = swap_bytes; (i -= wordSize) >= 0; ) {
          __ movptr(rdx_temp, Address(rax_argslot, i));
          __ push(rdx_temp);
        }

        if (rotate > 0) {
          // rotate upward
          __ subptr(rax_argslot, swap_bytes);
#ifdef ASSERT
          {
            // Verify that argslot > destslot, by at least swap_bytes.
            Label L_ok;
            __ cmpptr(rax_argslot, rbx_destslot);
            __ jccb(Assembler::aboveEqual, L_ok);
            __ stop("source must be above destination (upward rotation)");
            __ bind(L_ok);
          }
#endif
          // work argslot down to destslot, copying contiguous data upwards
          // pseudo-code:
          //   rax = src_addr - swap_bytes
          //   rbx = dest_addr
          //   while (rax >= rbx) *(rax + swap_bytes) = *(rax + 0), rax--;
          Label loop;
          __ bind(loop);
          __ movptr(rdx_temp, Address(rax_argslot, 0));
          __ movptr(Address(rax_argslot, swap_bytes), rdx_temp);
          __ addptr(rax_argslot, -wordSize);
          __ cmpptr(rax_argslot, rbx_destslot);
          __ jccb(Assembler::aboveEqual, loop);
        } else {
          __ addptr(rax_argslot, swap_bytes);
#ifdef ASSERT
          {
            // Verify that argslot < destslot, by at least swap_bytes.
            Label L_ok;
            __ cmpptr(rax_argslot, rbx_destslot);
            __ jccb(Assembler::belowEqual, L_ok);
            __ stop("source must be below destination (downward rotation)");
            __ bind(L_ok);
          }
#endif
          // work argslot up to destslot, copying contiguous data downwards
          // pseudo-code:
          //   rax = src_addr + swap_bytes
          //   rbx = dest_addr
          //   while (rax <= rbx) *(rax - swap_bytes) = *(rax + 0), rax++;
          Label loop;
          __ bind(loop);
          __ movptr(rdx_temp, Address(rax_argslot, 0));
          __ movptr(Address(rax_argslot, -swap_bytes), rdx_temp);
          __ addptr(rax_argslot, wordSize);
          __ cmpptr(rax_argslot, rbx_destslot);
          __ jccb(Assembler::belowEqual, loop);
        }

        // pop the original first chunk into the destination slot, now free
        for (int i = 0; i < swap_bytes; i += wordSize) {
          __ pop(rdx_temp);
          __ movptr(Address(rbx_destslot, i), rdx_temp);
        }
      }

      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_dup_args:
    {
      // 'argslot' is the position of the first argument to duplicate
      __ movl(rax_argslot, rcx_amh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot));

      // 'stack_move' is negative number of words to duplicate
      Register rdx_stack_move = rdx_temp;
      __ movl2ptr(rdx_stack_move, rcx_amh_conversion);
      __ sarptr(rdx_stack_move, CONV_STACK_MOVE_SHIFT);

      int argslot0_num = 0;
      Address argslot0 = __ argument_address(RegisterOrConstant(argslot0_num));
      assert(argslot0.base() == rsp, "");
      int pre_arg_size = argslot0.disp();
      assert(pre_arg_size % wordSize == 0, "");
      assert(pre_arg_size > 0, "must include PC");

      // remember the old rsp+1 (argslot[0])
      Register rbx_oldarg = rbx_temp;
      __ lea(rbx_oldarg, argslot0);

      // move rsp down to make room for dups
      __ lea(rsp, Address(rsp, rdx_stack_move, Address::times_ptr));

      // compute the new rsp+1 (argslot[0])
      Register rdx_newarg = rdx_temp;
      __ lea(rdx_newarg, argslot0);

      __ push(rdi);             // need a temp
      // (preceding push must be done after arg addresses are taken!)

      // pull down the pre_arg_size data (PC)
      for (int i = -pre_arg_size; i < 0; i += wordSize) {
        __ movptr(rdi, Address(rbx_oldarg, i));
        __ movptr(Address(rdx_newarg, i), rdi);
      }

      // copy from rax_argslot[0...] down to new_rsp[1...]
      // pseudo-code:
      //   rbx = old_rsp+1
      //   rdx = new_rsp+1
      //   rax = argslot
      //   while (rdx < rbx) *rdx++ = *rax++
      Label loop;
      __ bind(loop);
      __ movptr(rdi, Address(rax_argslot, 0));
      __ movptr(Address(rdx_newarg, 0), rdi);
      __ addptr(rax_argslot, wordSize);
      __ addptr(rdx_newarg, wordSize);
      __ cmpptr(rdx_newarg, rbx_oldarg);
      __ jccb(Assembler::less, loop);

      __ pop(rdi);              // restore temp

      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_drop_args:
    {
      // 'argslot' is the position of the first argument to nuke
      __ movl(rax_argslot, rcx_amh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot));

      __ push(rdi);             // need a temp
      // (must do previous push after argslot address is taken)

      // 'stack_move' is number of words to drop
      Register rdi_stack_move = rdi;
      __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
      __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
      remove_arg_slots(_masm, rdi_stack_move,
                       rax_argslot, rbx_temp, rdx_temp);

      __ pop(rdi);              // restore temp

      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);
    }
    break;

  case _adapter_collect_args:
    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
    break;

  case _adapter_spread_args:
    // handled completely by optimized cases
    __ stop("init_AdapterMethodHandle should not issue this");
    break;

  case _adapter_opt_spread_0:
  case _adapter_opt_spread_1:
  case _adapter_opt_spread_more:
    {
      // spread an array out into a group of arguments
      int length_constant = get_ek_adapter_opt_spread_info(ek);

      // find the address of the array argument
      __ movl(rax_argslot, rcx_amh_vmargslot);
      __ lea(rax_argslot, __ argument_address(rax_argslot));

      // grab some temps
      { __ push(rsi); __ push(rdi); }
      // (preceding pushes must be done after argslot address is taken!)
#define UNPUSH_RSI_RDI \
      { __ pop(rdi); __ pop(rsi); }

      // arx_argslot points both to the array and to the first output arg
      vmarg = Address(rax_argslot, 0);

      // Get the array value.
      Register  rsi_array       = rsi;
      Register  rdx_array_klass = rdx_temp;
      BasicType elem_type       = T_OBJECT;
      int       length_offset   = arrayOopDesc::length_offset_in_bytes();
      int       elem0_offset    = arrayOopDesc::base_offset_in_bytes(elem_type);
      __ movptr(rsi_array, vmarg);
      Label skip_array_check;
      if (length_constant == 0) {
        __ testptr(rsi_array, rsi_array);
        __ jcc(Assembler::zero, skip_array_check);
      }
      __ null_check(rsi_array, oopDesc::klass_offset_in_bytes());
      __ load_klass(rdx_array_klass, rsi_array);

      // Check the array type.
      Register rbx_klass = rbx_temp;
      __ load_heap_oop(rbx_klass, rcx_amh_argument); // this is a Class object!
      __ load_heap_oop(rbx_klass, Address(rbx_klass, java_lang_Class::klass_offset_in_bytes()));

      Label ok_array_klass, bad_array_klass, bad_array_length;
      __ check_klass_subtype(rdx_array_klass, rbx_klass, rdi, ok_array_klass);
      // If we get here, the type check failed!
      __ jmp(bad_array_klass);
      __ bind(ok_array_klass);

      // Check length.
      if (length_constant >= 0) {
        __ cmpl(Address(rsi_array, length_offset), length_constant);
      } else {
        Register rbx_vminfo = rbx_temp;
        __ movl(rbx_vminfo, rcx_amh_conversion);
        assert(CONV_VMINFO_SHIFT == 0, "preshifted");
        __ andl(rbx_vminfo, CONV_VMINFO_MASK);
        __ cmpl(rbx_vminfo, Address(rsi_array, length_offset));
      }
      __ jcc(Assembler::notEqual, bad_array_length);

      Register rdx_argslot_limit = rdx_temp;

      // Array length checks out.  Now insert any required stack slots.
      if (length_constant == -1) {
        // Form a pointer to the end of the affected region.
        __ lea(rdx_argslot_limit, Address(rax_argslot, Interpreter::stackElementSize));
        // 'stack_move' is negative number of words to insert
        Register rdi_stack_move = rdi;
        __ movl2ptr(rdi_stack_move, rcx_amh_conversion);
        __ sarptr(rdi_stack_move, CONV_STACK_MOVE_SHIFT);
        Register rsi_temp = rsi_array;  // spill this
        insert_arg_slots(_masm, rdi_stack_move, -1,
                         rax_argslot, rbx_temp, rsi_temp);
        // reload the array (since rsi was killed)
        __ movptr(rsi_array, vmarg);
      } else if (length_constant > 1) {
        int arg_mask = 0;
        int new_slots = (length_constant - 1);
        for (int i = 0; i < new_slots; i++) {
          arg_mask <<= 1;
          arg_mask |= _INSERT_REF_MASK;
        }
        insert_arg_slots(_masm, new_slots * stack_move_unit(), arg_mask,
                         rax_argslot, rbx_temp, rdx_temp);
      } else if (length_constant == 1) {
        // no stack resizing required
      } else if (length_constant == 0) {
        remove_arg_slots(_masm, -stack_move_unit(),
                         rax_argslot, rbx_temp, rdx_temp);
      }

      // Copy from the array to the new slots.
      // Note: Stack change code preserves integrity of rax_argslot pointer.
      // So even after slot insertions, rax_argslot still points to first argument.
      if (length_constant == -1) {
        // [rax_argslot, rdx_argslot_limit) is the area we are inserting into.
        Register rsi_source = rsi_array;
        __ lea(rsi_source, Address(rsi_array, elem0_offset));
        Label loop;
        __ bind(loop);
        __ movptr(rbx_temp, Address(rsi_source, 0));
        __ movptr(Address(rax_argslot, 0), rbx_temp);
        __ addptr(rsi_source, type2aelembytes(elem_type));
        __ addptr(rax_argslot, Interpreter::stackElementSize);
        __ cmpptr(rax_argslot, rdx_argslot_limit);
        __ jccb(Assembler::less, loop);
      } else if (length_constant == 0) {
        __ bind(skip_array_check);
        // nothing to copy
      } else {
        int elem_offset = elem0_offset;
        int slot_offset = 0;
        for (int index = 0; index < length_constant; index++) {
          __ movptr(rbx_temp, Address(rsi_array, elem_offset));
          __ movptr(Address(rax_argslot, slot_offset), rbx_temp);
          elem_offset += type2aelembytes(elem_type);
           slot_offset += Interpreter::stackElementSize;
        }
      }

      // Arguments are spread.  Move to next method handle.
      UNPUSH_RSI_RDI;
      __ load_heap_oop(rcx_recv, rcx_mh_vmtarget);
      __ jump_to_method_handle_entry(rcx_recv, rdx_temp);

      __ bind(bad_array_klass);
      UNPUSH_RSI_RDI;
      assert(!vmarg.uses(rarg2_required), "must be different registers");
      __ movptr(rarg2_required, Address(rdx_array_klass, java_mirror_offset));  // required type
      __ movptr(rarg1_actual,   vmarg);                                         // bad array
      __ movl(  rarg0_code,     (int) Bytecodes::_aaload);                      // who is complaining?
      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));

      __ bind(bad_array_length);
      UNPUSH_RSI_RDI;
      assert(!vmarg.uses(rarg2_required), "must be different registers");
      __ mov   (rarg2_required, rcx_recv);                       // AMH requiring a certain length
      __ movptr(rarg1_actual,   vmarg);                          // bad array
      __ movl(  rarg0_code,     (int) Bytecodes::_arraylength);  // who is complaining?
      __ jump(ExternalAddress(from_interpreted_entry(_raise_exception)));

#undef UNPUSH_RSI_RDI
    }
    break;

  case _adapter_flyby:
  case _adapter_ricochet:
    __ unimplemented(entry_name(ek)); // %%% FIXME: NYI
    break;

  default:  ShouldNotReachHere();
  }
  __ hlt();

  address me_cookie = MethodHandleEntry::start_compiled_entry(_masm, interp_entry);
  __ unimplemented(entry_name(ek)); // %%% FIXME: NYI

  init_entry(ek, MethodHandleEntry::finish_compiled_entry(_masm, me_cookie));
}
示例#19
0
address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
  const char *name = NULL;
  switch (type) {
    case T_BOOLEAN: name = "jni_fast_GetBooleanField"; break;
    case T_BYTE:    name = "jni_fast_GetByteField";    break;
    case T_CHAR:    name = "jni_fast_GetCharField";    break;
    case T_SHORT:   name = "jni_fast_GetShortField";   break;
    case T_INT:     name = "jni_fast_GetIntField";     break;
    case T_LONG:    name = "jni_fast_GetLongField";    break;
    default:        ShouldNotReachHere();
  }
  ResourceMark rm;
  BufferBlob* blob = BufferBlob::create(name, BUFFER_SIZE);
  CodeBuffer cbuf(blob);
  MacroAssembler* masm = new MacroAssembler(&cbuf);
  address fast_entry = __ pc();

  Label slow;

  ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr());
  __ mov32 (rcounter, counter);
  __ mov   (robj, c_rarg1);
  __ testb (rcounter, 1);
  __ jcc (Assembler::notZero, slow);
  if (os::is_MP()) {
    __ xorptr(robj, rcounter);
    __ xorptr(robj, rcounter);                   // obj, since
                                                // robj ^ rcounter ^ rcounter == robj
                                                // robj is data dependent on rcounter.
  }

  __ clear_jweak_tag(robj);

  __ movptr(robj, Address(robj, 0));             // *obj
  __ mov   (roffset, c_rarg2);
  __ shrptr(roffset, 2);                         // offset

  assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
  speculative_load_pclist[count] = __ pc();
  switch (type) {
    case T_BOOLEAN: __ movzbl (rax, Address(robj, roffset, Address::times_1)); break;
    case T_BYTE:    __ movsbl (rax, Address(robj, roffset, Address::times_1)); break;
    case T_CHAR:    __ movzwl (rax, Address(robj, roffset, Address::times_1)); break;
    case T_SHORT:   __ movswl (rax, Address(robj, roffset, Address::times_1)); break;
    case T_INT:     __ movl   (rax, Address(robj, roffset, Address::times_1)); break;
    case T_LONG:    __ movq   (rax, Address(robj, roffset, Address::times_1)); break;
    default:        ShouldNotReachHere();
  }

  if (os::is_MP()) {
    __ lea(rcounter_addr, counter);
    // ca is data dependent on rax.
    __ xorptr(rcounter_addr, rax);
    __ xorptr(rcounter_addr, rax);
    __ cmpl (rcounter, Address(rcounter_addr, 0));
  } else {
    __ cmp32 (rcounter, counter);
  }
  __ jcc (Assembler::notEqual, slow);

  __ ret (0);

  slowcase_entry_pclist[count++] = __ pc();
  __ bind (slow);
  address slow_case_addr = NULL;
  switch (type) {
    case T_BOOLEAN: slow_case_addr = jni_GetBooleanField_addr(); break;
    case T_BYTE:    slow_case_addr = jni_GetByteField_addr();    break;
    case T_CHAR:    slow_case_addr = jni_GetCharField_addr();    break;
    case T_SHORT:   slow_case_addr = jni_GetShortField_addr();   break;
    case T_INT:     slow_case_addr = jni_GetIntField_addr();     break;
    case T_LONG:    slow_case_addr = jni_GetLongField_addr();
  }
  // tail call
  __ jump (ExternalAddress(slow_case_addr));

  __ flush ();

  return fast_entry;
}
示例#20
0
    address generate_getPsrInfo() {
        // Flags to test CPU type.
        const uint32_t HS_EFL_AC           = 0x40000;
        const uint32_t HS_EFL_ID           = 0x200000;
        // Values for when we don't have a CPUID instruction.
        const int      CPU_FAMILY_SHIFT = 8;
        const uint32_t CPU_FAMILY_386   = (3 << CPU_FAMILY_SHIFT);
        const uint32_t CPU_FAMILY_486   = (4 << CPU_FAMILY_SHIFT);

        Label detect_486, cpu486, detect_586, std_cpuid1, std_cpuid4;
        Label sef_cpuid, ext_cpuid, ext_cpuid1, ext_cpuid5, ext_cpuid7, done;

        StubCodeMark mark(this, "VM_Version", "getPsrInfo_stub");
#   define __ _masm->

        address start = __ pc();

        //
        // void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
        //
        // LP64: rcx and rdx are first and second argument registers on windows

        __ push(rbp);
#ifdef _LP64
        __ mov(rbp, c_rarg0); // cpuid_info address
#else
        __ movptr(rbp, Address(rsp, 8)); // cpuid_info address
#endif
        __ push(rbx);
        __ push(rsi);
        __ pushf();          // preserve rbx, and flags
        __ pop(rax);
        __ push(rax);
        __ mov(rcx, rax);
        //
        // if we are unable to change the AC flag, we have a 386
        //
        __ xorl(rax, HS_EFL_AC);
        __ push(rax);
        __ popf();
        __ pushf();
        __ pop(rax);
        __ cmpptr(rax, rcx);
        __ jccb(Assembler::notEqual, detect_486);

        __ movl(rax, CPU_FAMILY_386);
        __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
        __ jmp(done);

        //
        // If we are unable to change the ID flag, we have a 486 which does
        // not support the "cpuid" instruction.
        //
        __ bind(detect_486);
        __ mov(rax, rcx);
        __ xorl(rax, HS_EFL_ID);
        __ push(rax);
        __ popf();
        __ pushf();
        __ pop(rax);
        __ cmpptr(rcx, rax);
        __ jccb(Assembler::notEqual, detect_586);

        __ bind(cpu486);
        __ movl(rax, CPU_FAMILY_486);
        __ movl(Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())), rax);
        __ jmp(done);

        //
        // At this point, we have a chip which supports the "cpuid" instruction
        //
        __ bind(detect_586);
        __ xorl(rax, rax);
        __ cpuid();
        __ orl(rax, rax);
        __ jcc(Assembler::equal, cpu486);   // if cpuid doesn't support an input
        // value of at least 1, we give up and
        // assume a 486
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        __ cmpl(rax, 0xa);                  // Is cpuid(0xB) supported?
        __ jccb(Assembler::belowEqual, std_cpuid4);

        //
        // cpuid(0xB) Processor Topology
        //
        __ movl(rax, 0xb);
        __ xorl(rcx, rcx);   // Threads level
        __ cpuid();

        __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB0_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        __ movl(rax, 0xb);
        __ movl(rcx, 1);     // Cores level
        __ cpuid();
        __ push(rax);
        __ andl(rax, 0x1f);  // Determine if valid topology level
        __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
        __ andl(rax, 0xffff);
        __ pop(rax);
        __ jccb(Assembler::equal, std_cpuid4);

        __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB1_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        __ movl(rax, 0xb);
        __ movl(rcx, 2);     // Packages level
        __ cpuid();
        __ push(rax);
        __ andl(rax, 0x1f);  // Determine if valid topology level
        __ orl(rax, rbx);    // eax[4:0] | ebx[0:15] == 0 indicates invalid level
        __ andl(rax, 0xffff);
        __ pop(rax);
        __ jccb(Assembler::equal, std_cpuid4);

        __ lea(rsi, Address(rbp, in_bytes(VM_Version::tpl_cpuidB2_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // cpuid(0x4) Deterministic cache params
        //
        __ bind(std_cpuid4);
        __ movl(rax, 4);
        __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x4) supported?
        __ jccb(Assembler::greater, std_cpuid1);

        __ xorl(rcx, rcx);   // L1 cache
        __ cpuid();
        __ push(rax);
        __ andl(rax, 0x1f);  // Determine if valid cache parameters used
        __ orl(rax, rax);    // eax[4:0] == 0 indicates invalid cache
        __ pop(rax);
        __ jccb(Assembler::equal, std_cpuid1);

        __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // Standard cpuid(0x1)
        //
        __ bind(std_cpuid1);
        __ movl(rax, 1);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // Check if OS has enabled XGETBV instruction to access XCR0
        // (OSXSAVE feature flag) and CPU supports AVX
        //
        __ andl(rcx, 0x18000000);
        __ cmpl(rcx, 0x18000000);
        __ jccb(Assembler::notEqual, sef_cpuid);

        //
        // XCR0, XFEATURE_ENABLED_MASK register
        //
        __ xorl(rcx, rcx);   // zero for XCR0 register
        __ xgetbv();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::xem_xcr0_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rdx);

        //
        // cpuid(0x7) Structured Extended Features
        //
        __ bind(sef_cpuid);
        __ movl(rax, 7);
        __ cmpl(rax, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset()))); // Is cpuid(0x7) supported?
        __ jccb(Assembler::greater, ext_cpuid);

        __ xorl(rcx, rcx);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::sef_cpuid7_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);

        //
        // Extended cpuid(0x80000000)
        //
        __ bind(ext_cpuid);
        __ movl(rax, 0x80000000);
        __ cpuid();
        __ cmpl(rax, 0x80000000);     // Is cpuid(0x80000001) supported?
        __ jcc(Assembler::belowEqual, done);
        __ cmpl(rax, 0x80000004);     // Is cpuid(0x80000005) supported?
        __ jccb(Assembler::belowEqual, ext_cpuid1);
        __ cmpl(rax, 0x80000006);     // Is cpuid(0x80000007) supported?
        __ jccb(Assembler::belowEqual, ext_cpuid5);
        __ cmpl(rax, 0x80000007);     // Is cpuid(0x80000008) supported?
        __ jccb(Assembler::belowEqual, ext_cpuid7);
        //
        // Extended cpuid(0x80000008)
        //
        __ movl(rax, 0x80000008);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // Extended cpuid(0x80000007)
        //
        __ bind(ext_cpuid7);
        __ movl(rax, 0x80000007);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid7_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // Extended cpuid(0x80000005)
        //
        __ bind(ext_cpuid5);
        __ movl(rax, 0x80000005);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // Extended cpuid(0x80000001)
        //
        __ bind(ext_cpuid1);
        __ movl(rax, 0x80000001);
        __ cpuid();
        __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
        __ movl(Address(rsi, 0), rax);
        __ movl(Address(rsi, 4), rbx);
        __ movl(Address(rsi, 8), rcx);
        __ movl(Address(rsi,12), rdx);

        //
        // return
        //
        __ bind(done);
        __ popf();
        __ pop(rsi);
        __ pop(rbx);
        __ pop(rbp);
        __ ret(0);

#   undef __

        return start;
    };
  address generate_verify_oop() {
    StubCodeMark mark(this, "StubRoutines", "verify_oop");
    address start = __ pc();
    
    // Incoming arguments on stack after saving eax:
    //
    // [tos    ]: saved edx
    // [tos + 1]: saved EFLAGS
    // [tos + 2]: return address
    // [tos + 3]: char* error message
    // [tos + 4]: oop   object to verify
    // [tos + 5]: saved eax - saved by caller and bashed
    
    Label exit, error;
    __ pushfd();
    __ incl(Address((int)StubRoutines::verify_oop_count_addr(), relocInfo::none));
    __ pushl(edx);                               // save edx
    // make sure object is 'reasonable'
    __ movl(eax, Address(esp, 4 * wordSize));    // get object
    __ testl(eax, eax);
    __ jcc(Assembler::zero, exit);               // if obj is NULL it is ok
    
    // Check if the oop is in the right area of memory
    const int oop_mask = Universe::verify_oop_mask();
    const int oop_bits = Universe::verify_oop_bits();
    __ movl(edx, eax);
    __ andl(edx, oop_mask);
    __ cmpl(edx, oop_bits);
    __ jcc(Assembler::notZero, error);

    // make sure klass is 'reasonable'
    __ movl(eax, Address(eax, oopDesc::klass_offset_in_bytes())); // get klass
    __ testl(eax, eax);
    __ jcc(Assembler::zero, error);              // if klass is NULL it is broken

    // Check if the klass is in the right area of memory
    const int klass_mask = Universe::verify_klass_mask();
    const int klass_bits = Universe::verify_klass_bits();
    __ movl(edx, eax);
    __ andl(edx, klass_mask);
    __ cmpl(edx, klass_bits);
    __ jcc(Assembler::notZero, error);

    // make sure klass' klass is 'reasonable'
    __ movl(eax, Address(eax, oopDesc::klass_offset_in_bytes())); // get klass' klass
    __ testl(eax, eax);
    __ jcc(Assembler::zero, error);              // if klass' klass is NULL it is broken

    __ movl(edx, eax);
    __ andl(edx, klass_mask);
    __ cmpl(edx, klass_bits);
    __ jcc(Assembler::notZero, error);           // if klass not in right area
                                                 // of memory it is broken too.

    // return if everything seems ok
    __ bind(exit);
    __ movl(eax, Address(esp, 5 * wordSize));    // get saved eax back
    __ popl(edx);                                // restore edx
    __ popfd();                                  // restore EFLAGS
    __ ret(3 * wordSize);                        // pop arguments

    // handle errors
    __ bind(error);
    __ movl(eax, Address(esp, 5 * wordSize));    // get saved eax back
    __ popl(edx);                                // get saved edx back
    __ popfd();                                  // get saved EFLAGS off stack -- will be ignored
    __ pushad();                                 // push registers (eip = return address & msg are already pushed)
    __ call(CAST_FROM_FN_PTR(address, MacroAssembler::debug), relocInfo::runtime_call_type);
    __ popad();
    __ ret(3 * wordSize);                        // pop arguments
    return start;
  }
示例#22
0
void NativeGenerator::generate_native_string_entries() {

  comment_section("Native entry points for string functions");
  {

    //--------------------java.lang.String.indexof0---------------------------
    rom_linkable_entry("native_string_indexof0_entry");

    wtk_profile_quick_call(/* param_size*/ 2);

    comment("Pop the return address");
    popl(edi);
    comment("Push zero for fromIndex");
    pushl(Constant(0));
    comment("Push back the return address");
    pushl(edi);

    jmp(Constant("native_string_indexof_entry"));
    rom_linkable_entry_end(); // native_string_indexof0_entry

    //--------------------java.lang.String.indexof---------------------------

    rom_linkable_entry("native_string_indexof_entry");
    Label cont, loop, test, failure, success;

    wtk_profile_quick_call(/* param_size*/ 3);

    comment("Pop the return address");
    popl(edi);

    comment("Pop the argument: fromIndex");
    pop_int(eax, eax);

    comment("Pop the argument: ch");
    pop_int(ebx, ebx);

    comment("Pop the receiver");
    pop_obj(ecx, ecx);

    cmpl(ebx, Constant(0xFFFF));
    jcc(greater, Constant(failure));

    cmpl(eax, Constant(0));
    jcc(greater_equal, Constant(cont));
    movl(eax, Constant(0));

    bind(cont);
    movl(esi, Address(ecx, Constant(String::count_offset())));

    comment("if (fromIndex >= count) { return -1; }");
    cmpl(eax, esi);
    jcc(greater_equal, Constant(failure));

    movl(edx, Address(ecx, Constant(String::offset_offset())));
    addl(eax, edx); // i = offset + fromIndex
    addl(edx, esi); // int max = offset + count;
    movl(esi, Address(ecx, Constant(String::value_offset())));    // v = value.
    jmp(Constant(test));

    bind(loop);
    cmpw(Address(esi, eax, times_2, Constant(Array::base_offset())),  ebx);
    jcc(equal, Constant(success));
    incl(eax);

    bind(test);
    cmpl(eax, edx);
    jcc(less, Constant(loop));

    comment("Return -1 by pushing the value and jumping to the return address");
    bind(failure);
    push_int(-1);
    jmp(edi);

    comment("Return i - offset by pushing the value and jumping to the return address");
    bind(success);
    movl(esi, Address(ecx, Constant(String::offset_offset())));   // i = offset + fromIndex
    subl(eax, esi);
    push_int(eax);
    jmp(edi);

    rom_linkable_entry_end(); // native_string_indexof_entry
  }

  //----------------------java.lang.String.charAt---------------------------

  {
    rom_linkable_entry("native_string_charAt_entry");
    if (AddExternCUnderscore) {
      emit_instruction("jmp _interpreter_method_entry");
    } else {
      emit_instruction("jmp  interpreter_method_entry");
    }
    rom_linkable_entry_end();
  }

  //----------------------java.lang.String(java.lang.StringBuffer)-------------

  {
    rom_linkable_entry("native_string_init_entry");
    if (AddExternCUnderscore) {
      emit_instruction("jmp _interpreter_method_entry");
    } else {
      emit_instruction("jmp  interpreter_method_entry");
    }
    rom_linkable_entry_end();
  }

  //----------------------java.lang.String.equals(java.lang.Object)------------

  {
    rom_linkable_entry("native_string_equals_entry");
    if (AddExternCUnderscore) {
      emit_instruction("jmp _interpreter_method_entry");
    } else {
      emit_instruction("jmp  interpreter_method_entry");
    }
    rom_linkable_entry_end();
  }

  //----------------------java.lang.String.indexOf(java.lang.String)-----------

  {
    rom_linkable_entry("native_string_indexof0_string_entry");
    if (AddExternCUnderscore) {
      emit_instruction("jmp _interpreter_method_entry");
    } else {
      emit_instruction("jmp  interpreter_method_entry");
    }
    rom_linkable_entry_end();
  }

  //----------------------java.lang.String.indexOf(java.lang.String)-----------

  {
    rom_linkable_entry("native_string_indexof_string_entry");
    if (AddExternCUnderscore) {
      emit_instruction("jmp _interpreter_method_entry");
    } else {
      emit_instruction("jmp  interpreter_method_entry");
    }
    rom_linkable_entry_end();
  }

  //----------------------java.lang.String.compareTo---------------------------

  { // java.lang.String.compareTo
    // Method int compareTo(java.lang.String)

    rom_linkable_entry("native_string_compareTo_entry");

    wtk_profile_quick_call(/* param_size*/ 2);

    comment("preserve method");
    pushl(ebx);

    // 8 is return address plus pushed method
    int  str1_offset =  JavaFrame::arg_offset_from_sp(0) + 8,
         str0_offset =  JavaFrame::arg_offset_from_sp(1) + 8;

    comment("load arguments to registers");
    movl(ecx, Address(esp, Constant(str1_offset)));
    movl(eax, Address(esp, Constant(str0_offset)));

    // eax: str0: this String
    // ebx: str1: String to compare against

    Label bailout;

    comment("Null check");
    testl(ecx, ecx);
    jcc(zero, Constant(bailout));

    comment("get str0.value[]");
    movl(esi, Address(eax, Constant(String::value_offset())));
    comment("get str0.offset");
    movl(ebx, Address(eax, Constant(String::offset_offset())));
    comment("compute start of character data");
    leal(esi, Address(esi, ebx, times_2, Constant(Array::base_offset())));
    comment("get str0.count");
    movl(eax, Address(eax, Constant(String::count_offset())));

    comment("get str1.value[]");
    movl(edi, Address(ecx, Constant(String::value_offset())));
    comment("get str1.offset");
    movl(ebx, Address(ecx, Constant(String::offset_offset())));
    comment("compute start of character data");
    leal(edi, Address(edi, ebx, times_2, Constant(Array::base_offset())));
    comment("get str1.count");
    movl(ebx, Address(ecx, Constant(String::count_offset())));

    // esi = str0 start of character data
    // edi = str1 start of character data
    // eax = str0 length
    // ebx = str1 length

    Label str1_longest;
    subl(eax, ebx);
    jcc(greater_equal, Constant(str1_longest));
    // str1 is longer than str0
    addl(ebx, eax);
    bind(str1_longest);

    // esi = str0 start of character data
    // edi = str1 start of character data
    // eax = str0.count - str1.count
    // ebx = min(str0.count, str1.count)

    // save str0.count - str1.count, we might need it later
    pushl(eax);

    xorl(ecx, ecx);

    Label loop, check_lengths, done;
    bind(loop);
    cmpl(ecx, ebx);
    jcc(above_equal, Constant(check_lengths));
    movzxw(eax, Address(esi, ecx, times_2));
    movzxw(edx, Address(edi, ecx, times_2));
    subl(eax, edx);
    jcc(not_equal, Constant(done));
    incl(ecx);
    jmp(Constant(loop));

    bind(check_lengths);
    movl(eax, Address(esp));

    bind(done);
    popl(ebx); // remove saved length difference

    // Push result on stack and return to caller
    popl(ebx);     // remove method
    popl(edi);     // pop return address
    addl(esp, Constant(2 * BytesPerStackElement)); // remove arguments
    push_int(eax); // push result
    jmp(edi);      // return

    comment("Bail out to the general compareTo implementation");
    bind(bailout);
    comment("pop method");
    popl(ebx);
    if (AddExternCUnderscore) {
      emit_instruction("jmp _interpreter_method_entry");
    } else {
      emit_instruction("jmp  interpreter_method_entry");
    }

    rom_linkable_entry_end(); // native_string_compareTo_entry
  }

  //----------------------java.lang.String.endsWith----------------

  {
    // java.lang.String.endsWith
    // Method boolean endsWith(java.lang.String)

    rom_linkable_entry("native_string_endsWith_entry");

    wtk_profile_quick_call(/* param_size*/ 2);

    Label bailout;

    // 4 is return address
    int suffix_offset =  JavaFrame::arg_offset_from_sp(0) + 4,
        this_offset =  JavaFrame::arg_offset_from_sp(1) + 4;

    comment("load arguments to registers");
    movl(eax, Address(esp, Constant(suffix_offset)));
    cmpl(eax, Constant(0));
    jcc(equal, Constant(bailout));

    movl(ecx, Address(esp, Constant(this_offset)));

    comment("Pop the return address");
    popl(edi);

    movl(edx, Address(ecx, Constant(String::count_offset())));
    subl(edx, Address(eax, Constant(String::count_offset())));

    comment("Push (this.count - suffix.count) for toffset");
    pushl(edx);

    comment("Push back the return address");
    pushl(edi);

    jmp(Constant("native_string_startsWith_entry"));

    comment("Bail out to the general startsWith implementation");
    bind(bailout);
    if (AddExternCUnderscore) {
      emit_instruction("jmp _interpreter_method_entry");
    } else {
      emit_instruction("jmp  interpreter_method_entry");
    }

    rom_linkable_entry_end(); // native_string_endsWith_entry
  }

  //----------------------java.lang.String.startsWith----------------
  {
    // java.lang.String.startsWith
    // Method boolean startsWith(java.lang.String)
    rom_linkable_entry("native_string_startsWith0_entry");

    wtk_profile_quick_call(/* param_size*/ 2);
    Label bailout;

    // 4 is return address
    int prefix_offset = JavaFrame::arg_offset_from_sp(0) + 4;

    comment("Check if prefix is null");
    cmpl(Address(esp, Constant(prefix_offset)), Constant(0));
    jcc(equal, Constant(bailout));

    comment("Pop the return address");
    popl(edi);
    comment("Push zero for toffset");
    pushl(Constant(0));
    comment("Push back the return address");
    pushl(edi);

    jmp(Constant("native_string_startsWith_entry"));

    comment("Bail out to the general startsWith implementation");
    bind(bailout);
    if (AddExternCUnderscore) {
      emit_instruction("jmp _interpreter_method_entry");
    } else {
      emit_instruction("jmp  interpreter_method_entry");
    }

    rom_linkable_entry_end(); // native_string_startsWith0_entry
  }

  {
    // ----------- java.lang.String.startsWith ------------------------------
    // Method boolean startsWith(java.lang.String,int)

    rom_linkable_entry("native_string_startsWith_entry");

    wtk_profile_quick_call(/* param_size*/ 3);

    Label bailout, return_false;

    // 4 is return address
    int  prefix_offset =  JavaFrame::arg_offset_from_sp(1) + 4;

    comment("Check if prefix is null");
    cmpl(Address(esp, Constant(prefix_offset)), Constant(0));
    jcc(equal, Constant(bailout));

    comment("Pop the return address");
    popl(edi);

    comment("Pop the argument: toffset");
    pop_int(edx, edx);

    comment("Pop the argument: prefix");
    pop_obj(eax, eax);

    comment("Pop the receiver");
    pop_obj(ecx, ecx);

    comment("Preserve the return address");
    pushl(edi);

    // ecx: this String
    // eax: prefix

    cmpl(edx, Constant(0));
    jcc(less, Constant(return_false));

    comment("if (toffset > this.count - prefix.count) return false;");
    movl(ebx, Address(ecx, Constant(String::count_offset())));
    subl(ebx, Address(eax, Constant(String::count_offset())));
    cmpl(edx, ebx);
    jcc(greater, Constant(return_false));

    comment("get this.value[]");
    movl(esi, Address(ecx, Constant(String::value_offset())));
    comment("get this.offset");
    movl(ebx, Address(ecx, Constant(String::offset_offset())));
    comment("add toffset");
    addl(ebx, edx);
    comment("compute start of character data");
    leal(esi, Address(esi, ebx, times_2, Constant(Array::base_offset())));

    comment("get prefix.value[]");
    movl(edi, Address(eax, Constant(String::value_offset())));
    comment("get prefix.offset");
    movl(ebx, Address(eax, Constant(String::offset_offset())));
    comment("compute start of character data");
    leal(edi, Address(edi, ebx, times_2, Constant(Array::base_offset())));

    comment("get prefix.count");
    movl(ecx, Address(eax, Constant(String::count_offset())));
    comment("get the number of bytes to compare");
    shll(ecx, Constant(1));

    comment("memcmp(edi, esi, ecx);");
    pushl(ecx);
    pushl(esi);
    pushl(edi);

    if (GenerateInlineAsm) {
      // VC++ treats memcmp() as an intrinsic function and would cause
      // reference to memcmp in Interpreter_i386.c to fail to compile.
      call(Constant("memcmp_from_interpreter"));
    } else {
      call(Constant("memcmp"));
    }
    addl(esp, Constant(12));
    cmpl(eax, Constant(0));
    jcc(not_equal, Constant(return_false));

    // Push 1 on stack and return to caller
    popl(edi);     // pop return address
    push_int(1);   // push result
    jmp(edi);      // return

    bind(return_false);
    // Push 0 on stack and return to caller
    popl(edi);     // pop return address
    push_int(0);   // push result
    jmp(edi);      // return

    comment("Bail out to the general startsWith implementation");
    bind(bailout);
    if (AddExternCUnderscore) {
      emit_instruction("jmp _interpreter_method_entry");
    } else {
      emit_instruction("jmp  interpreter_method_entry");
    }

    rom_linkable_entry_end(); // native_string_startsWith_entry
  }
}
  address generate_call_stub(address& return_address) {
    StubCodeMark mark(this, "StubRoutines", "call_stub");
    address start = __ pc();

    // stub code parameters / addresses
    assert(frame::entry_frame_call_wrapper_offset == 2, "adjust this code");
    bool  sse_save = false;
    const Address esp_after_call(ebp, -4 * wordSize); // same as in generate_catch_exception()!
    const Address mxcsr_save    (ebp, -4 * wordSize);
    const Address result        (ebp,  3 * wordSize);
    const Address result_type   (ebp,  4 * wordSize);
    const Address method        (ebp,  5 * wordSize);
    const Address entry_point   (ebp,  6 * wordSize);
    const Address parameters    (ebp,  7 * wordSize);
    const Address parameter_size(ebp,  8 * wordSize);
    const Address thread        (ebp,  9 * wordSize); // same as in generate_catch_exception()!
#ifdef COMPILER2
    sse_save =  VM_Version::supports_sse();
#endif

    // stub code
    __ enter();    

    // save edi, esi, & ebx, according to C calling conventions
    __ pushl(edi);
    __ pushl(esi);
    __ pushl(ebx);
    __ subl(esp, wordSize);  // space for %mxcsr save
    // save and initialize %mxcsr
    if (sse_save) {
      __ stmxcsr(mxcsr_save);
      __ ldmxcsr(Address((int) StubRoutines::addr_mxcsr_std(), relocInfo::none));
    }

#ifdef ASSERT
    // make sure we have no pending exceptions
    { Label L;
      __ movl(ecx, thread);
      __ cmpl(Address(ecx, Thread::pending_exception_offset()), (int)NULL);
      __ jcc(Assembler::equal, L);
      __ stop("StubRoutines::call_stub: entered with pending exception");
      __ bind(L);
    }
#endif

    // pass parameters if any
    Label parameters_done;
    __ movl(ecx, parameter_size);  // parameter counter
    __ testl(ecx, ecx);
    __ jcc(Assembler::zero, parameters_done);

    // parameter passing loop

    Label loop;
    __ movl(edx, parameters);	       // parameter pointer
    __ movl(esi, ecx);                 // parameter counter is in esi now
    __ movl(ecx,  Address(edx));       // get first parameter in case it is a receiver

    __ bind(loop);
    __ movl(eax, Address(edx));	       // get parameter
    __ addl(edx, wordSize);            // advance to next parameter
    __ decl(esi);                      // decrement counter
    __ pushl(eax);                     // pass parameter
    __ jcc(Assembler::notZero, loop);

    // call Java function
    __ bind(parameters_done);
    __ movl(ebx, method);              // get methodOop
    __ movl(esi, entry_point);         // get entry_point
    __ call(esi, relocInfo::none);
    return_address = __ pc();

    // store result depending on type
    // (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
    __ movl(edi, result);
    Label is_long, is_float, is_double, exit;
    __ movl(esi, result_type);
    __ cmpl(esi, T_LONG);
    __ jcc(Assembler::equal, is_long);
    __ cmpl(esi, T_FLOAT);
    __ jcc(Assembler::equal, is_float);
    __ cmpl(esi, T_DOUBLE);
    __ jcc(Assembler::equal, is_double);

    // handle T_INT case
    __ movl(Address(edi), eax);
    __ bind(exit);

    // pop parameters
    __ movl(ecx, parameter_size);
    __ leal(esp, Address(esp, ecx, Address::times_4));

    // check if parameters have been popped correctly
#ifdef ASSERT
      Label esp_wrong;
      __ leal(edi, esp_after_call);
      __ cmpl(esp, edi);
      __ jcc(Assembler::notEqual, esp_wrong);
#endif

    // restore %mxcsr
    if (sse_save) {
      __ ldmxcsr(mxcsr_save);
    }
    // restore edi & esi
    __ addl(esp, wordSize);  // remove %mxcsr save area
    __ popl(ebx);
    __ popl(esi);
    __ popl(edi);    

    // return
    __ popl(ebp);
    __ ret(0);

    // handle return types different from T_INT
    __ bind(is_long);
    __ movl(Address(edi, 0 * wordSize), eax);
    __ movl(Address(edi, 1 * wordSize), edx);
    __ jmp(exit);

    __ bind(is_float);
    __ fstp_s(Address(edi));
    __ jmp(exit);

    __ bind(is_double);
    __ fstp_d(Address(edi));
    __ jmp(exit);

#ifdef ASSERT
      // stack pointer misadjusted
      __ bind(esp_wrong);
      __ stop("esp wrong after Java call");
#endif

    return start;
  }
示例#24
0
void NativeGenerator::generate_native_system_entries() {
  comment_section("Native entry points for system functions");

  rom_linkable_entry("native_jvm_unchecked_byte_arraycopy_entry");
  jmp(Constant("native_system_arraycopy_entry"));
  rom_linkable_entry_end();

  rom_linkable_entry("native_jvm_unchecked_char_arraycopy_entry");
  jmp(Constant("native_system_arraycopy_entry"));
  rom_linkable_entry_end();

  rom_linkable_entry("native_jvm_unchecked_int_arraycopy_entry");
  jmp(Constant("native_system_arraycopy_entry"));
  rom_linkable_entry_end();

  rom_linkable_entry("native_jvm_unchecked_long_arraycopy_entry");
  jmp(Constant("native_system_arraycopy_entry"));
  rom_linkable_entry_end();

  rom_linkable_entry("native_jvm_unchecked_obj_arraycopy_entry");
  jmp(Constant("native_system_arraycopy_entry"));
  rom_linkable_entry_end();

  rom_linkable_entry("native_system_arraycopy_entry");

  wtk_profile_quick_call(/* param_size*/ 5);

  Label bailout, cont, try_2_byte, try_4_byte, try_8_byte, do_4_byte;

  //  public static native void arraycopy(Object src, int src_position,
  //                                      Object dst, int dst_position,
  //                                      int length);
  comment("preserve method");
  pushl(ebx);

  // 8 is for the preserved method and the return address
  int  length_offset  =  JavaFrame::arg_offset_from_sp(0) + 8,
       dst_pos_offset =  JavaFrame::arg_offset_from_sp(1) + 8,
       dst_offset     =  JavaFrame::arg_offset_from_sp(2) + 8,
       src_pos_offset =  JavaFrame::arg_offset_from_sp(3) + 8,
       src_offset     =  JavaFrame::arg_offset_from_sp(4) + 8;

  comment("load arguments to registers");
  movl(ecx, Address(esp, Constant(length_offset)));
  movl(edi, Address(esp, Constant(dst_pos_offset)));
  movl(edx, Address(esp, Constant(dst_offset)));
  movl(esi, Address(esp, Constant(src_pos_offset)));
  movl(eax, Address(esp, Constant(src_offset)));

  // eax = src
  // ebx = tmp register
  // edx = dst
  // ecx = length
  // esi = src_pos
  // edi = dst_pos

  comment("if (src == NULL) goto bailout;");
  testl( eax, eax );
  jcc(zero, Constant(bailout));

  comment("if (dst == NULL) goto bailout;");
  testl( edx, edx );
  jcc(zero, Constant(bailout));

  comment("if (length < 0 || src_pos < 0 || dst_pos < 0) goto bailout;");
  movl(ebx, ecx);
  orl(ebx, esi);
  orl(ebx, edi);
  jcc(negative, Constant(bailout));

  comment("if ((unsigned int) dst.length < (unsigned int) dst_pos + (unsigned int) length) goto bailout;");
  movl(ebx, ecx);
  addl(ebx, edi);
  cmpl(Address(edx, Constant(Array::length_offset())), ebx);
  jcc(below, Constant(bailout));

  comment("if ((unsigned int) src.length < (unsigned int) src_pos + (unsigned int) length) goto bailout;");
  movl(ebx, ecx);
  addl(ebx, esi);
  cmpl(Address(eax, Constant(Array::length_offset())), ebx);
  jcc(below, Constant(bailout));

  comment("Same near test");
  comment("if (src.near != dst.near) goto bailout;");
  movl(ebx, Address(eax, Constant(Oop::klass_offset())));
  cmpl(ebx, Address(edx, Constant(Oop::klass_offset())));
  jcc(not_equal, Constant(bailout));

  comment("load the instance_size");
  movl(ebx, Address(ebx, Constant(JavaNear::klass_offset())));
  movsxw(ebx, Address(ebx, Constant(FarClass::instance_size_offset())));

  comment("if (instance_size != size_type_array_1()) goto try_2_byte");
  cmpl(ebx, Constant(InstanceSize::size_type_array_1));
  jcc(not_equal, Constant(try_2_byte));
  leal(esi, Address(eax, esi, times_1, Constant(Array::base_offset())));
  leal(edi, Address(edx, edi, times_1, Constant(Array::base_offset())));
  jmp(Constant(cont));

  bind(try_2_byte);
  comment("if (instance_size != size_type_array_2()) goto try_4_byte");
  cmpl(ebx, Constant(InstanceSize::size_type_array_2));
  jcc(not_equal, Constant(try_4_byte));
  leal(esi, Address(eax, esi, times_2, Constant(Array::base_offset())));
  leal(edi, Address(edx, edi, times_2, Constant(Array::base_offset())));
  shll(ecx, Constant(1));
  jmp(Constant(cont));

  bind(try_4_byte);
  comment("if (instance_size == size_type_array_4()) goto do_4_byte");
  cmpl(ebx, Constant(InstanceSize::size_type_array_4));
  jcc(equal, Constant(do_4_byte) );

  comment("if (instance_size != size_obj_array()) goto bailout");
  cmpl(ebx, Constant(InstanceSize::size_obj_array));
  jcc(not_equal, Constant(bailout));

  comment("if (dst < old_generation_end) goto bailout");
  cmpl( edx, Address( Constant( "_old_generation_end" ) ) );
  jcc( below, Constant(bailout));

  bind(do_4_byte);
  leal(esi, Address(eax, esi, times_4, Constant(Array::base_offset())));
  leal(edi, Address(edx, edi, times_4, Constant(Array::base_offset())));
  shll(ecx, Constant(2));

  bind(cont);
  comment("memmove(edi, esi, ecx);");
  pushl(ecx);
  pushl(esi);
  pushl(edi);
  call(Constant("memmove"));
  addl(esp, Constant(16));

  ret(Constant(5 * BytesPerStackElement));

  comment("Bail out to the general arraycopy implementation");
  bind(bailout);
  comment("pop method");
  popl(ebx);

  if (AddExternCUnderscore) {
    emit_instruction("jmp _interpreter_method_entry");
  } else {
    emit_instruction("jmp  interpreter_method_entry");
  }

  rom_linkable_entry_end(); // native_system_arraycopy_entry
}
示例#25
0
int cmph(const void *a, const void *b)
{
	return cmpl(b,a);
}
address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
  const char *name;
  switch (type) {
    case T_FLOAT:     name = "jni_fast_GetFloatField";     break;
    case T_DOUBLE:    name = "jni_fast_GetDoubleField";    break;
    default:          ShouldNotReachHere();
  }
  ResourceMark rm;
  BufferBlob* b = BufferBlob::create(name, BUFFER_SIZE);
  address fast_entry = b->instructions_begin();
  CodeBuffer cbuf(fast_entry, b->instructions_size());
  MacroAssembler* masm = new MacroAssembler(&cbuf);

  Label slow;

  ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr());
  __ mov32 (rcounter, counter);
  __ mov   (robj, c_rarg1);
  __ testb (rcounter, 1);
  __ jcc (Assembler::notZero, slow);
  if (os::is_MP()) {
    __ xorptr(robj, rcounter);
    __ xorptr(robj, rcounter);                   // obj, since
                                                // robj ^ rcounter ^ rcounter == robj
                                                // robj is data dependent on rcounter.
  }
  __ movptr(robj, Address(robj, 0));             // *obj
  __ mov   (roffset, c_rarg2);
  __ shrptr(roffset, 2);                         // offset

  assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
  speculative_load_pclist[count] = __ pc();
  switch (type) {
    case T_FLOAT:  __ movflt (xmm0, Address(robj, roffset, Address::times_1)); break;
    case T_DOUBLE: __ movdbl (xmm0, Address(robj, roffset, Address::times_1)); break;
    default:        ShouldNotReachHere();
  }

  if (os::is_MP()) {
    __ lea(rcounter_addr, counter);
    __ movdq (rax, xmm0);
    // counter address is data dependent on xmm0.
    __ xorptr(rcounter_addr, rax);
    __ xorptr(rcounter_addr, rax);
    __ cmpl (rcounter, Address(rcounter_addr, 0));
  } else {
    __ cmp32 (rcounter, counter);
  }
  __ jcc (Assembler::notEqual, slow);

  __ ret (0);

  slowcase_entry_pclist[count++] = __ pc();
  __ bind (slow);
  address slow_case_addr;
  switch (type) {
    case T_FLOAT:     slow_case_addr = jni_GetFloatField_addr();  break;
    case T_DOUBLE:    slow_case_addr = jni_GetDoubleField_addr();
  }
  // tail call
  __ jump (ExternalAddress(slow_case_addr));

  __ flush ();

  return fast_entry;
}
void OptoRuntime::generate_exception_blob() {

  // Capture info about frame layout
  enum layout {
    thread_off,                 // last_java_sp
    // The frame sender code expects that rbp will be in the "natural" place and
    // will override any oopMap setting for it. We must therefore force the layout
    // so that it agrees with the frame sender code.
    rbp_off,
    return_off,                 // slot for return address
    framesize
  };

  // allocate space for the code
  ResourceMark rm;
  // setup code generation tools
  CodeBuffer   buffer("exception_blob", 512, 512);
  MacroAssembler* masm = new MacroAssembler(&buffer);

  OopMapSet *oop_maps = new OopMapSet();

  address start = __ pc();

  __ push(rdx);
  __ subptr(rsp, return_off * wordSize);   // Prolog!

  // rbp, location is implicitly known
  __ movptr(Address(rsp,rbp_off  *wordSize), rbp);

  // Store exception in Thread object. We cannot pass any arguments to the
  // handle_exception call, since we do not want to make any assumption
  // about the size of the frame where the exception happened in.
  __ get_thread(rcx);
  __ movptr(Address(rcx, JavaThread::exception_oop_offset()), rax);
  __ movptr(Address(rcx, JavaThread::exception_pc_offset()),  rdx);

  // This call does all the hard work.  It checks if an exception handler
  // exists in the method.
  // If so, it returns the handler address.
  // If not, it prepares for stack-unwinding, restoring the callee-save
  // registers of the frame being removed.
  //
  __ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument
  __ set_last_Java_frame(rcx, noreg, noreg, NULL);

  __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));

  // No registers to map, rbp is known implicitly
  oop_maps->add_gc_map( __ pc() - start,  new OopMap( framesize, 0 ));
  __ get_thread(rcx);
  __ reset_last_Java_frame(rcx, false, false);

  // Restore callee-saved registers
  __ movptr(rbp, Address(rsp, rbp_off * wordSize));

  __ addptr(rsp, return_off * wordSize);   // Epilog!
  __ pop(rdx); // Exception pc

  // rax: exception handler for given <exception oop/exception pc>

  // Restore SP from BP if the exception PC is a MethodHandle call.
  __ cmpl(Address(rcx, JavaThread::is_method_handle_exception_offset()), 0);
  __ cmovptr(Assembler::notEqual, rsp, rbp);

  // We have a handler in rax, (could be deopt blob)
  // rdx - throwing pc, deopt blob will need it.

  __ push(rax);

  // Get the exception
  __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
  // Get the exception pc in case we are deoptimized
  __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
#ifdef ASSERT
  __ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), NULL_WORD);
  __ movptr(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
#endif
  // Clear the exception oop so GC no longer processes it as a root.
  __ movptr(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);

  __ pop(rcx);

  // rax: exception oop
  // rcx: exception handler
  // rdx: exception pc
  __ jmp (rcx);

  // -------------
  // make sure all code is generated
  masm->flush();

  _exception_blob = ExceptionBlob::create(&buffer, oop_maps, framesize);
}
// used by compiler only; may use only caller saved registers eax, ebx, ecx.
// edx holds first int arg, esi, edi, ebp are callee-save & must be preserved.
// Leave reciever in ecx; required behavior when +OptoArgsInRegisters
// is modifed to put first oop in ecx.
//
// NOTE: If this code is used by the C1, the receiver_location is always 0.
VtableStub* VtableStubs::create_vtable_stub(int vtable_index, int receiver_location) {
  const int i486_code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index, receiver_location);
  ResourceMark rm;
  MacroAssembler* masm = new MacroAssembler(new CodeBuffer(s->entry_point(), i486_code_length));

#ifndef PRODUCT
#ifdef COMPILER2
  if (CountCompiledCalls) __ incl(Address((int)OptoRuntime::nof_megamorphic_calls_addr(), relocInfo::none));
#endif
#endif

  // get receiver (need to skip return address on top of stack)
#ifdef COMPILER1
  assert(receiver_location == 0, "receiver is always in ecx - no location info needed");
#else
  if( receiver_location < SharedInfo::stack0 ) {
    assert(receiver_location == ECX_num, "receiver expected in ecx");
  } else {
    __ movl(ecx, Address(esp, SharedInfo::reg2stack(OptoReg::Name(receiver_location)) * wordSize+wordSize/*skip return address*/));
  }
#endif
  // get receiver klass
  address npe_addr = __ pc();
  __ movl(eax, Address(ecx, oopDesc::klass_offset_in_bytes()));
  // compute entry offset (in words)
  int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
#ifndef PRODUCT
  if (DebugVtables) { 
    Label L;
    // check offset vs vtable length
    __ cmpl(Address(eax, instanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size());
    __ jcc(Assembler::greater, L);
    __ movl(ebx, vtable_index);
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), ecx, ebx);
    __ bind(L);
  }
#endif // PRODUCT
  // load methodOop and target address
#ifdef COMPILER1
  __ movl(ebx, Address(eax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()));
  address ame_addr = __ pc();
  __ movl(edx, Address(ebx, methodOopDesc::from_compiled_code_entry_point_offset()));
  if (DebugVtables) {
    Label L;
    __ testl(edx, edx);
    __ jcc(Assembler::notZero, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  }
  // eax: receiver klass
  // ebx: methodOop
  // ecx: receiver
  // edx: entry point
  __ jmp(edx);
#else
  __ movl(eax, Address(eax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()));
  address ame_addr = __ pc();
  __ movl(ebx, Address(eax, methodOopDesc::from_compiled_code_entry_point_offset()));  

  if (DebugVtables) {
    Label L;
    __ testl(ebx, ebx);
    __ jcc(Assembler::notZero, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  } 


  // jump to target (either compiled code or c2iadapter)
  // eax: methodOop (in case we call c2iadapter)
  __ jmp(ebx);
#endif // COMPILER1

  masm->flush();
  s->set_exception_points(npe_addr, ame_addr);
  return s;
}