コード例 #1
0
ファイル: methodHandles_x86.cpp プロジェクト: pombreda/graal
void MethodHandles::verify_klass(MacroAssembler* _masm,
                                 Register obj, SystemDictionary::WKID klass_id,
                                 const char* error_message) {
  Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
  KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
  Register temp = rdi;
  Register temp2 = noreg;
  LP64_ONLY(temp2 = rscratch1);  // used by MacroAssembler::cmpptr
  Label L_ok, L_bad;
  BLOCK_COMMENT("verify_klass {");
  __ verify_oop(obj);
  __ testptr(obj, obj);
  __ jcc(Assembler::zero, L_bad);
  __ push(temp); if (temp2 != noreg)  __ push(temp2);
#define UNPUSH { if (temp2 != noreg)  __ pop(temp2);  __ pop(temp); }
  __ load_klass(temp, obj);
  __ cmpptr(temp, ExternalAddress((address) klass_addr));
  __ jcc(Assembler::equal, L_ok);
  intptr_t super_check_offset = klass->super_check_offset();
  __ movptr(temp, Address(temp, super_check_offset));
  __ cmpptr(temp, ExternalAddress((address) klass_addr));
  __ jcc(Assembler::equal, L_ok);
  UNPUSH;
  __ bind(L_bad);
  __ STOP(error_message);
  __ BIND(L_ok);
  UNPUSH;
  BLOCK_COMMENT("} verify_klass");
}
コード例 #2
0
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
    const int aligned_mask = BytesPerWord -1;
    const int hdr_offset = oopDesc::mark_offset_in_bytes();
    assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction");
    assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
    Label done;

    if (UseBiasedLocking) {
        // load object
        movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
        biased_locking_exit(obj, hdr, done);
    }

    // load displaced header
    movptr(hdr, Address(disp_hdr, 0));
    // if the loaded hdr is NULL we had recursive locking
    testptr(hdr, hdr);
    // if we had recursive locking, we are done
    jcc(Assembler::zero, done);
    if (!UseBiasedLocking) {
        // load object
        movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
    }
    verify_oop(obj);
    // test if object header is pointing to the displaced header, and if so, restore
    // the displaced header in the object - if the object header is not pointing to
    // the displaced header, get the object header instead
    if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
    cmpxchgptr(hdr, Address(obj, hdr_offset));
    // if the object header was not pointing to the displaced header,
    // we do unlocking via runtime call
    jcc(Assembler::notEqual, slow_case);
    // done
    bind(done);
}
コード例 #3
0
void CompilerStubs::generate_compiler_new_object() {
  comment_section("Compiler new object (any size)");
  comment("Register edx holds the instance size, register ebx holds the prototypical near of the instance class");
  Label slow_case;
  entry("compiler_new_object");

  comment("Get _inline_allocation_top");
  movl(eax, Address(Constant("_inline_allocation_top")));

  comment("Compute new top");
  leal(ecx, Address(eax, edx, times_1));

  if (GenerateDebugAssembly) {
    comment("Check ExcessiveGC");
    testl(Address(Constant("ExcessiveGC")), Constant(0));
    jcc(not_zero, Constant(slow_case));
  }

  comment("Compare against _inline_allocation_end");
  cmpl(ecx, Address(Constant("_inline_allocation_end")));
  jcc(above, Constant(slow_case));

  comment("Allocation succeeded, set _inline_allocation_top");
  movl(Address(Constant("_inline_allocation_top")), ecx);

  comment("Set prototypical near in object; no need for write barrier");
  movl(Address(eax), ebx);

  comment("Compute remaining size");
  decrement(edx, oopSize);

  comment("One-word object?");
  Label init_done;
  jcc(zero, Constant(init_done));

  comment("Zero object fields");
  xorl(ecx, ecx);
  Label init_loop;
  bind(init_loop);
  movl(Address(eax, edx, times_1), ecx);
  decrement(edx, oopSize);
  jcc(not_zero, Constant(init_loop));
  bind(init_done);

  comment("The newly allocated object is in register eax");
  ret();

  comment("Slow case - call the VM runtime system");
  bind(slow_case);
  leal(eax, Address(Constant("newobject")));
  goto_shared_call_vm(T_OBJECT);

  entry_end(); // compiler_new_object
}
コード例 #4
0
  address generate_forward_exception() {
    StubCodeMark mark(this, "StubRoutines", "forward exception");
    address start = __ pc();

    // Upon entry, the sp points to the return address returning into Java
    // (interpreted or compiled) code; i.e., the return address becomes the
    // throwing pc.
    //
    // Arguments pushed before the runtime call are still on the stack but
    // the exception handler will reset the stack pointer -> ignore them.
    // A potential result in registers can be ignored as well.

#ifdef ASSERT
    // make sure this code is only executed if there is a pending exception
    { Label L;
      __ get_thread(ecx);
      __ cmpl(Address(ecx, Thread::pending_exception_offset()), (int)NULL);
      __ jcc(Assembler::notEqual, L);
      __ stop("StubRoutines::forward exception: no pending exception (1)");
      __ bind(L);
    }
#endif

    // compute exception handler into ebx
    __ movl(eax, Address(esp));
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), eax);
    __ movl(ebx, eax);

    // setup eax & edx, remove return address & clear pending exception
    __ get_thread(ecx);
    __ popl(edx);
    __ movl(eax, Address(ecx, Thread::pending_exception_offset()));
    __ movl(Address(ecx, Thread::pending_exception_offset()), (int)NULL);

#ifdef ASSERT
    // make sure exception is set
    { Label L;
      __ testl(eax, eax);
      __ jcc(Assembler::notEqual, L);
      __ stop("StubRoutines::forward exception: no pending exception (2)");
      __ bind(L);
    }
#endif

    // continue at exception handler (return address removed)
    // eax: exception
    // ebx: exception handler
    // edx: throwing pc
    __ verify_oop(eax);
    __ jmp(ebx);

    return start;
  }
コード例 #5
0
/**
 * Method entry for static native methods:
 *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
 *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
 */
address InterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
  if (UseCRC32Intrinsics) {
    address entry = __ pc();

    // rbx,: Method*
    // rsi: senderSP must preserved for slow path, set SP to it on fast path
    // rdx: scratch
    // rdi: scratch

    Label slow_path;
    // If we need a safepoint check, generate full interpreter entry.
    ExternalAddress state(SafepointSynchronize::address_of_state());
    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
             SafepointSynchronize::_not_synchronized);
    __ jcc(Assembler::notEqual, slow_path);

    // We don't generate local frame and don't align stack because
    // we call stub code and there is no safepoint on this path.

    // Load parameters
    const Register crc = rax;  // crc
    const Register buf = rdx;  // source java byte array address
    const Register len = rdi;  // length

    // value              x86_32
    // interp. arg ptr    ESP + 4
    // int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
    //                                         3           2      1        0
    // int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
    //                                              4         2,3      1        0

    // Arguments are reversed on java expression stack
    __ movl(len,   Address(rsp,   4 + 0)); // Length
    // Calculate address of start element
    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // long buf
      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
      __ movl(crc,   Address(rsp, 4 + 4 * wordSize)); // Initial CRC
    } else {
      __ movptr(buf, Address(rsp, 4 + 2 * wordSize)); // byte[] array
      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
      __ addptr(buf, Address(rsp, 4 + 1 * wordSize)); // + offset
      __ movl(crc,   Address(rsp, 4 + 3 * wordSize)); // Initial CRC
    }

    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
    // result in rax

    // _areturn
    __ pop(rdi);                // get return address
    __ mov(rsp, rsi);           // set sp to sender sp
    __ jmp(rdi);

    // generate a vanilla native entry as the slow path
    __ bind(slow_path);
    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
    return entry;
  }
  return NULL;
}
コード例 #6
0
address InterpreterGenerator::generate_empty_entry(void) {

  // rbx,: methodOop
  // rcx: receiver (unused)
  // rsi: previous interpreter state (C++ interpreter) must preserve
  // rsi: sender sp must set sp to this value on return

  if (!UseFastEmptyMethods) return NULL;

  address entry_point = __ pc();

  // If we need a safepoint check, generate full interpreter entry.
  Label slow_path;
  ExternalAddress state(SafepointSynchronize::address_of_state());
  __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
           SafepointSynchronize::_not_synchronized);
  __ jcc(Assembler::notEqual, slow_path);

  // do nothing for empty methods (do not even increment invocation counter)
  // Code: _return
  // _return
  // return w/o popping parameters
  __ pop(rax);
  __ mov(rsp, rsi);
  __ jmp(rax);

  __ bind(slow_path);
  (void) generate_normal_entry(false);
  return entry_point;
}
コード例 #7
0
  address generate_catch_exception() {
    StubCodeMark mark(this, "StubRoutines", "catch_exception");
    const Address esp_after_call(ebp, -4 * wordSize); // same as in generate_call_stub()!
    const Address thread        (ebp,  9 * wordSize); // same as in generate_call_stub()!
    address start = __ pc();

    // get thread directly
    __ movl(ecx, thread);
#ifdef ASSERT
    // verify that threads correspond
    { Label L;
      __ get_thread(ebx);
      __ cmpl(ebx, ecx);
      __ jcc(Assembler::equal, L);
      __ stop("StubRoutines::catch_exception: threads must correspond");
      __ bind(L);
    }
#endif
    // set pending exception
    __ verify_oop(eax);
    __ movl(Address(ecx, Thread::pending_exception_offset()), eax          );
    __ movl(Address(ecx, Thread::exception_file_offset   ()), (int)__FILE__);
    __ movl(Address(ecx, Thread::exception_line_offset   ()),      __LINE__);
    // complete return to VM
    assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before");
    __ jmp(StubRoutines::_call_stub_return_address, relocInfo::none);

    return start;
  }
コード例 #8
0
// preserves obj, destroys len_in_bytes
void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) {
  Label done;
  assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
  assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
  Register index = len_in_bytes;
  // index is positive and ptr sized
  subptr(index, hdr_size_in_bytes);
  jcc(Assembler::zero, done);
  // initialize topmost word, divide index by 2, check if odd and test if zero
  // note: for the remaining code to work, index must be a multiple of BytesPerWord
#ifdef ASSERT
  { Label L;
    testptr(index, BytesPerWord - 1);
    jcc(Assembler::zero, L);
    stop("index is not a multiple of BytesPerWord");
    bind(L);
  }
#endif
  xorptr(t1, t1);    // use _zero reg to clear memory (shorter code)
  if (UseIncDec) {
    shrptr(index, 3);  // divide by 8/16 and set carry flag if bit 2 was set
  } else {
    shrptr(index, 2);  // use 2 instructions to avoid partial flag stall
    shrptr(index, 1);
  }
#ifndef _LP64
  // index could have been not a multiple of 8 (i.e., bit 2 was set)
  { Label even;
    // note: if index was a multiple of 8, than it cannot
    //       be 0 now otherwise it must have been 0 before
    //       => if it is even, we don't need to check for 0 again
    jcc(Assembler::carryClear, even);
    // clear topmost word (no jump needed if conditional assignment would work here)
    movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1);
    // index could be 0 now, need to check again
    jcc(Assembler::zero, done);
    bind(even);
  }
#endif // !_LP64
  // initialize remaining object fields: rdx is a multiple of 2 now
  { Label loop;
    bind(loop);
    movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1);
    NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);)
    decrement(index);
    jcc(Assembler::notZero, loop);
  }
コード例 #9
0
void InterpreterRuntime::SignatureHandlerGenerator::box(int from_offset, int to_offset) {
    __ leal(temp(), Address(from(), from_offset * wordSize));
    __ cmpl(Address(from(), from_offset * wordSize), 0); // do not use temp() to avoid AGI
    Label L;
    __ jcc(Assembler::notZero, L);
    __ movl(temp(), 0);
    __ bind(L);
    __ movl(Address(to(), to_offset * wordSize), temp());
}
コード例 #10
0
void check_cycles_int(cpu_options *opts, uint32_t address)
{
	code_info *code = &opts->code;
	cmp_rr(code, opts->cycles, opts->limit, SZ_D);
	code_ptr jmp_off = code->cur+1;
	jcc(code, CC_A, jmp_off+1);
	mov_ir(code, address, opts->scratch1, SZ_D);
	call(code, opts->handle_cycle_limit_int);
	*jmp_off = code->cur - (jmp_off+1);
}
コード例 #11
0
void check_cycles(cpu_options * opts)
{
	code_info *code = &opts->code;
	cmp_rr(code, opts->cycles, opts->limit, SZ_D);
	check_alloc_code(code, MAX_INST_LEN*2);
	code_ptr jmp_off = code->cur+1;
	jcc(code, CC_A, jmp_off+1);
	call(code, opts->handle_cycle_limit);
	*jmp_off = code->cur - (jmp_off+1);
}
コード例 #12
0
// preserves obj, destroys len_in_bytes
void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int hdr_size_in_bytes, Register t1) {
    assert(hdr_size_in_bytes >= 0, "header size must be positive or 0");
    Label done;

    // len_in_bytes is positive and ptr sized
    subptr(len_in_bytes, hdr_size_in_bytes);
    jcc(Assembler::zero, done);
    zero_memory(obj, len_in_bytes, hdr_size_in_bytes, t1);
    bind(done);
}
コード例 #13
0
void InterpreterStubs::generate_interpreter_rethrow_exception() {
  comment_section("Interpreter rethrow exception");
  comment("Register eax holds the exception; Interpreter state is not in registers");

  entry("interpreter_rethrow_exception");
  comment("Restore bytecode and locals pointers");
  movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset())));
  movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset())));

  comment("Mark the bytecode pointer as being inside an exception");
  addl(esi, Constant(JavaFrame::exception_frame_flag));

  comment("Clear the expression stack");
  movl(esp, Address(ebp, Constant(JavaFrame::stack_bottom_pointer_offset())));
  
  comment("Push the exception on the expression stack");
  push_obj(eax);

  comment("Get exception handler bci for exception");
  interpreter_call_vm(Constant("exception_handler_bci_for_exception"), T_INT);
  
  comment("Check if we got a bci - otherwise unwind the activation");
  cmpl(eax, Constant(-1));
  jcc(equal, Constant("interpreter_unwind_activation"));
#if ENABLE_JAVA_DEBUGGER
  Label skip;
  cmpb(Address(Constant("_debugger_active")), Constant(0));
  jcc(equal, Constant(skip));
  movl(edx, eax);
  interpreter_call_vm(Constant("handle_caught_exception"), T_VOID);
  comment("Re-get exception handler bci for exception");
  interpreter_call_vm(Constant("exception_handler_bci_for_exception"), T_INT);
  bind(skip);
#endif
  comment("Convert the bytecode index into a bytecode pointer");
  movl(ecx, Address(ebp, Constant(JavaFrame::method_offset())));
  leal(esi, Address(ecx, eax, times_1, Constant(Method::base_offset())));

  // Dispatch to the exception handler.
  dispatch_next();

  entry_end(); // interpreter_rethrow_exception
}
/**
 * Method entry for static native methods:
 *   int java.util.zip.CRC32.updateBytes(int crc, byte[] b, int off, int len)
 *   int java.util.zip.CRC32.updateByteBuffer(int crc, long buf, int off, int len)
 */
address TemplateInterpreterGenerator::generate_CRC32_updateBytes_entry(AbstractInterpreter::MethodKind kind) {
  if (UseCRC32Intrinsics) {
    address entry = __ pc();

    // rbx,: Method*
    // r13: senderSP must preserved for slow path, set SP to it on fast path

    Label slow_path;
    // If we need a safepoint check, generate full interpreter entry.
    ExternalAddress state(SafepointSynchronize::address_of_state());
    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
             SafepointSynchronize::_not_synchronized);
    __ jcc(Assembler::notEqual, slow_path);

    // We don't generate local frame and don't align stack because
    // we call stub code and there is no safepoint on this path.

    // Load parameters
    const Register crc = c_rarg0;  // crc
    const Register buf = c_rarg1;  // source java byte array address
    const Register len = c_rarg2;  // length
    const Register off = len;      // offset (never overlaps with 'len')

    // Arguments are reversed on java expression stack
    // Calculate address of start element
    if (kind == Interpreter::java_util_zip_CRC32_updateByteBuffer) {
      __ movptr(buf, Address(rsp, 3*wordSize)); // long buf
      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
      __ addq(buf, off); // + offset
      __ movl(crc,   Address(rsp, 5*wordSize)); // Initial CRC
    } else {
      __ movptr(buf, Address(rsp, 3*wordSize)); // byte[] array
      __ addptr(buf, arrayOopDesc::base_offset_in_bytes(T_BYTE)); // + header size
      __ movl2ptr(off, Address(rsp, 2*wordSize)); // offset
      __ addq(buf, off); // + offset
      __ movl(crc,   Address(rsp, 4*wordSize)); // Initial CRC
    }
    // Can now load 'len' since we're finished with 'off'
    __ movl(len, Address(rsp, wordSize)); // Length

    __ super_call_VM_leaf(CAST_FROM_FN_PTR(address, StubRoutines::updateBytesCRC32()), crc, buf, len);
    // result in rax

    // _areturn
    __ pop(rdi);                // get return address
    __ mov(rsp, r13);           // set sp to sender sp
    __ jmp(rdi);

    // generate a vanilla native entry as the slow path
    __ bind(slow_path);
    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
    return entry;
  }
  return NULL;
}
コード例 #15
0
void G1PostBarrierStub::emit_code(LIR_Assembler* ce) {
  __ bind(_entry);
  assert(addr()->is_register(), "Precondition.");
  assert(new_val()->is_register(), "Precondition.");
  Register new_val_reg = new_val()->as_register();
  __ cmpptr(new_val_reg, (int32_t) NULL_WORD);
  __ jcc(Assembler::equal, _continuation);
  ce->store_parameter(addr()->as_pointer_register(), 0);
  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_post_barrier_slow_id)));
  __ jmp(_continuation);
}
コード例 #16
0
void InterpreterStubs::generate_interpreter_call_vm_redo() {
#if ENABLE_JAVA_DEBUGGER
  Label check_breakpoint, no_breakpoint;
#endif
  comment_section("Interpreter call VM - and repeat current bytecode upon termination");
  entry("interpreter_call_vm_redo");

  comment("Save bytecode pointer");
  movl(Address(ebp, Constant(JavaFrame::bcp_store_offset())), esi);

  comment("Call the shared call vm and disregard any return value");
  call_shared_call_vm(T_VOID);
 
  comment("Restore bytecode pointer");
  movl(esi, Address(ebp, Constant(JavaFrame::bcp_store_offset())));

  comment("Restore locals pointer");
  movl(edi, Address(ebp, Constant(JavaFrame::locals_pointer_offset())));

#if ENABLE_JAVA_DEBUGGER
  comment("Check to see if we are connected to a debugger");
  cmpb(Address(Constant("_debugger_active")), Constant(0));
  jcc(not_zero, Constant(check_breakpoint));
  bind(no_breakpoint);
  comment("Not debugging, so just dispatch");
  dispatch_next(0);
  bind(check_breakpoint);
  comment("We are debugging, so let's see if we just replaced a breakpoint opcode");
  cmpb(Address(esi), Constant(Bytecodes::_breakpoint));
  jcc(not_zero, Constant(no_breakpoint));
  comment("There is a breakpoint in the code, so that means that eax has the correct opcode");
  comment("So just jmp directly without using esi");
  andl(eax, Constant(0xFF));
  movl(ebx, eax);
  jmp(Address(no_reg, ebx, times_4, Constant("interpreter_dispatch_table")));
#else
  dispatch_next(0);
#endif
  entry_end(); // interpreter_call_vm_redo
}
コード例 #17
0
ファイル: jamcoin.cpp プロジェクト: dubwub/C-practice
int main()
{
  JamCoinCreator jcc(50,16);
  std::cout << "Case #1:" << std::endl;
  for (int i = 0; i < jcc.jamcoins.size(); i++) {
    std::cout << jcc.jamcoins[i].value;
    for (int j = 0; j < jcc.jamcoins[i].proofs.size(); j++) {
      std::cout << " " << jcc.jamcoins[i].proofs[j];
    }
    std::cout << std::endl;
  }
  //delete[] jcc.composites;
}
コード例 #18
0
void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flush_icache_stub) {
  StubCodeMark mark(this, "ICache", "flush_icache_stub");

  address start = __ pc();
#ifdef AMD64

  const Register addr  = c_rarg0;
  const Register lines = c_rarg1;
  const Register magic = c_rarg2;

  Label flush_line, done;

  __ testl(lines, lines);
  __ jcc(Assembler::zero, done);

  // Force ordering wrt cflush.
  // Other fence and sync instructions won't do the job.
  __ mfence();

  __ bind(flush_line);
  __ clflush(Address(addr, 0));
  __ addptr(addr, ICache::line_size);
  __ decrementl(lines);
  __ jcc(Assembler::notZero, flush_line);

  __ mfence();

  __ bind(done);

#else
  const Address magic(rsp, 3*wordSize);
  __ lock(); __ addl(Address(rsp, 0), 0);
#endif // AMD64
  __ movptr(rax, magic); // Handshake with caller to make sure it happened!
  __ ret(0);

  // Must be set here so StubCodeMark destructor can call the flush stub.
  *flush_icache_stub = (ICache::flush_icache_stub_t)start;
}
コード例 #19
0
ファイル: interp_masm_x86.cpp プロジェクト: eregon/jvmci
void InterpreterMacroAssembler::narrow(Register result) {

  // Get method->_constMethod->_result_type
  movptr(rcx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
  movptr(rcx, Address(rcx, Method::const_offset()));
  load_unsigned_byte(rcx, Address(rcx, ConstMethod::result_type_offset()));

  Label done, notBool, notByte, notChar;

  // common case first
  cmpl(rcx, T_INT);
  jcc(Assembler::equal, done);

  // mask integer result to narrower return type.
  cmpl(rcx, T_BOOLEAN);
  jcc(Assembler::notEqual, notBool);
  andl(result, 0x1);
  jmp(done);

  bind(notBool);
  cmpl(rcx, T_BYTE);
  jcc(Assembler::notEqual, notByte);
  LP64_ONLY(movsbl(result, result);)
コード例 #20
0
void CompilerStubs::generate_compiler_idiv_irem() {
  comment_section("Compiler integer divide and remainder");
  comment("Register eax holds the dividend, register ebx holds the divisor");

  entry("compiler_idiv_irem");
  const int min_int = 0x80000000;
  Label normal_case, special_case;

  Label throw_exception;

  testl(ebx,ebx);
  jcc(equal, Constant(throw_exception));

  // Check for special case
  cmpl(eax, Constant(min_int));
  jcc(not_equal, Constant(normal_case));
  xorl(edx, edx); // Prepare edx for possible special case (where remainder = 0)
  cmpl(ebx, Constant(-1));
  jcc(equal, Constant(special_case));

  // Handle normal case
  bind(normal_case);
  cdql();
  idivl(ebx);

  // Normal and special case exit
  bind(special_case);
  ret();

  bind(throw_exception);
  comment("Throw a DivisionByZeroException");
  leal(eax, Address(Constant("division_by_zero_exception")));
  goto_shared_call_vm(T_VOID);

  entry_end(); // compiler_idiv_irem
}
/**
 * Method entry for static native methods:
 *   int java.util.zip.CRC32.update(int crc, int b)
 */
address TemplateInterpreterGenerator::generate_CRC32_update_entry() {
  if (UseCRC32Intrinsics) {
    address entry = __ pc();

    // rbx,: Method*
    // r13: senderSP must preserved for slow path, set SP to it on fast path
    // c_rarg0: scratch (rdi on non-Win64, rcx on Win64)
    // c_rarg1: scratch (rsi on non-Win64, rdx on Win64)

    Label slow_path;
    // If we need a safepoint check, generate full interpreter entry.
    ExternalAddress state(SafepointSynchronize::address_of_state());
    __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
             SafepointSynchronize::_not_synchronized);
    __ jcc(Assembler::notEqual, slow_path);

    // We don't generate local frame and don't align stack because
    // we call stub code and there is no safepoint on this path.

    // Load parameters
    const Register crc = rax;  // crc
    const Register val = c_rarg0;  // source java byte value
    const Register tbl = c_rarg1;  // scratch

    // Arguments are reversed on java expression stack
    __ movl(val, Address(rsp,   wordSize)); // byte value
    __ movl(crc, Address(rsp, 2*wordSize)); // Initial CRC

    __ lea(tbl, ExternalAddress(StubRoutines::crc_table_addr()));
    __ notl(crc); // ~crc
    __ update_byte_crc32(crc, val, tbl);
    __ notl(crc); // ~crc
    // result in rax

    // _areturn
    __ pop(rdi);                // get return address
    __ mov(rsp, r13);           // set sp to sender sp
    __ jmp(rdi);

    // generate a vanilla native entry as the slow path
    __ bind(slow_path);
    __ jump_to_entry(Interpreter::entry_for_kind(Interpreter::native));
    return entry;
  }
  return NULL;
}
コード例 #22
0
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {

  // At this point we know that marking is in progress

  __ bind(_entry);
  assert(pre_val()->is_register(), "Precondition.");

  Register pre_val_reg = pre_val()->as_register();

  ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);

  __ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
  __ jcc(Assembler::equal, _continuation);
  ce->store_parameter(pre_val()->as_register(), 0);
  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
  __ jmp(_continuation);

}
コード例 #23
0
ファイル: methodHandles_x86.cpp プロジェクト: pombreda/graal
void MethodHandles::verify_ref_kind(MacroAssembler* _masm, int ref_kind, Register member_reg, Register temp) {
  Label L;
  BLOCK_COMMENT("verify_ref_kind {");
  __ movl(temp, Address(member_reg, NONZERO(java_lang_invoke_MemberName::flags_offset_in_bytes())));
  __ shrl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_SHIFT);
  __ andl(temp, java_lang_invoke_MemberName::MN_REFERENCE_KIND_MASK);
  __ cmpl(temp, ref_kind);
  __ jcc(Assembler::equal, L);
  { char* buf = NEW_C_HEAP_ARRAY(char, 100, mtInternal);
    jio_snprintf(buf, 100, "verify_ref_kind expected %x", ref_kind);
    if (ref_kind == JVM_REF_invokeVirtual ||
        ref_kind == JVM_REF_invokeSpecial)
      // could do this for all ref_kinds, but would explode assembly code size
      trace_method_handle(_masm, buf);
    __ STOP(buf);
  }
  BLOCK_COMMENT("} verify_ref_kind");
  __ bind(L);
}
コード例 #24
0
void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register var_size_in_bytes, int con_size_in_bytes, Register t1, Register t2, bool is_tlab_allocated) {
    assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
           "con_size_in_bytes is not multiple of alignment");
    const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;

    initialize_header(obj, klass, noreg, t1, t2);

    if (!(UseTLAB && ZeroTLAB && is_tlab_allocated)) {
        // clear rest of allocated space
        const Register t1_zero = t1;
        const Register index = t2;
        const int threshold = 6 * BytesPerWord;   // approximate break even point for code size (see comments below)
        if (var_size_in_bytes != noreg) {
            mov(index, var_size_in_bytes);
            initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
        } else if (con_size_in_bytes <= threshold) {
            // use explicit null stores
            // code size = 2 + 3*n bytes (n = number of fields to clear)
            xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
            for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
                movptr(Address(obj, i), t1_zero);
        } else if (con_size_in_bytes > hdr_size_in_bytes) {
            // use loop to null out the fields
            // code size = 16 bytes for even n (n = number of fields to clear)
            // initialize last object field first if odd number of fields
            xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
            movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3);
            // initialize last object field if constant size is odd
            if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0)
                movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero);
            // initialize remaining object fields: rdx is a multiple of 2
            {   Label loop;
                bind(loop);
                movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)),
                       t1_zero);
                NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)),
                                t1_zero);)
                decrement(index);
                jcc(Assembler::notZero, loop);
            }
        }
コード例 #25
0
ファイル: methodHandles_x86.cpp プロジェクト: pombreda/graal
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
                                        Register recv, Register method_temp,
                                        Register temp2,
                                        bool for_compiler_entry) {
  BLOCK_COMMENT("jump_to_lambda_form {");
  // This is the initial entry point of a lazy method handle.
  // After type checking, it picks up the invoker from the LambdaForm.
  assert_different_registers(recv, method_temp, temp2);
  assert(recv != noreg, "required register");
  assert(method_temp == rbx, "required register for loading method");

  //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });

  // Load the invoker, as MH -> MH.form -> LF.vmentry
  __ verify_oop(recv);
  __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
  __ verify_oop(method_temp);
  __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
  __ verify_oop(method_temp);
  // the following assumes that a Method* is normally compressed in the vmtarget field:
  __ movptr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));

  if (VerifyMethodHandles && !for_compiler_entry) {
    // make sure recv is already on stack
    __ movptr(temp2, Address(method_temp, Method::const_offset()));
    __ load_sized_value(temp2,
                        Address(temp2, ConstMethod::size_of_parameters_offset()),
                        sizeof(u2), /*is_signed*/ false);
    // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
    Label L;
    __ cmpptr(recv, __ argument_address(temp2, -1));
    __ jcc(Assembler::equal, L);
    __ movptr(rax, __ argument_address(temp2, -1));
    __ STOP("receiver not on stack");
    __ BIND(L);
  }

  jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
  BLOCK_COMMENT("} jump_to_lambda_form");
}
コード例 #26
0
void InterpreterStubs::generate_interpreter_rethrow_exception_init() {
  comment_section("Interpreter rethrow exception init");
  comment("Register eax holds the exception; Interpreter state is not in registers");
  entry("interpreter_rethrow_exception_init");
#if ENABLE_JAVA_DEBUGGER
  Label skip;
  cmpb(Address(Constant("_debugger_active")), Constant(0));
  jcc(equal, Constant(skip));
  comment("push the exception object so we don't nuke it");
  push_obj(eax);
  comment("call debugger code to store relevant info about where exception happened");
  interpreter_call_vm(Constant("handle_exception_info"), T_VOID);
  pop_obj(eax, edx);
  bind(skip);
#endif
  if (GenerateInlineAsm)
      jmp(Constant("interpreter_rethrow_exception_init"));
  else
      comment("fall through to rethrow_exception"); // IMPL_NOTE: FALLTHROUGH

  entry_end(); // interpreter_rethrow_exception_init
}
コード例 #27
0
void G1PreBarrierStub::emit_code(LIR_Assembler* ce) {
  // At this point we know that marking is in progress.
  // If do_load() is true then we have to emit the
  // load of the previous value; otherwise it has already
  // been loaded into _pre_val.

  __ bind(_entry);
  assert(pre_val()->is_register(), "Precondition.");

  Register pre_val_reg = pre_val()->as_register();

  if (do_load()) {
    ce->mem2reg(addr(), pre_val(), T_OBJECT, patch_code(), info(), false /*wide*/, false /*unaligned*/);
  }

  __ cmpptr(pre_val_reg, (int32_t) NULL_WORD);
  __ jcc(Assembler::equal, _continuation);
  ce->store_parameter(pre_val()->as_register(), 0);
  __ call(RuntimeAddress(Runtime1::entry_for(Runtime1::g1_pre_barrier_slow_id)));
  __ jmp(_continuation);

}
コード例 #28
0
void MethodHandles::jump_from_method_handle(MacroAssembler* _masm, Register method, Register temp,
                                            bool for_compiler_entry) {
  assert(method == rbx, "interpreter calling convention");

   Label L_no_such_method;
   __ testptr(rbx, rbx);
   __ jcc(Assembler::zero, L_no_such_method);

  __ verify_method_ptr(method);

  if (!for_compiler_entry && JvmtiExport::can_post_interpreter_events()) {
    Label run_compiled_code;
    // JVMTI events, such as single-stepping, are implemented partly by avoiding running
    // compiled code in threads for which the event is enabled.  Check here for
    // interp_only_mode if these events CAN be enabled.
#ifdef _LP64
    Register rthread = r15_thread;
#else
    Register rthread = temp;
    __ get_thread(rthread);
#endif
    // interp_only is an int, on little endian it is sufficient to test the byte only
    // Is a cmpl faster?
    __ cmpb(Address(rthread, JavaThread::interp_only_mode_offset()), 0);
    __ jccb(Assembler::zero, run_compiled_code);
    __ jmp(Address(method, Method::interpreter_entry_offset()));
    __ BIND(run_compiled_code);
  }

  const ByteSize entry_offset = for_compiler_entry ? Method::from_compiled_offset() :
                                                     Method::from_interpreted_offset();
  __ jmp(Address(method, entry_offset));

  __ bind(L_no_such_method);
  __ jump(RuntimeAddress(StubRoutines::throw_AbstractMethodError_entry()));
}
コード例 #29
0
// These stubs are used by the compiler only.
// Argument registers, which must be preserved:
//   rcx - receiver (always first argument)
//   rdx - second argument (if any)
// Other registers that might be usable:
//   rax - inline cache register (is interface for itable stub)
//   rbx - method (used when calling out to interpreter)
// Available now, but may become callee-save at some point:
//   rsi, rdi
// Note that rax and rdx are also used for return values.
//
VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
  const int i486_code_length = VtableStub::pd_code_size_limit(true);
  VtableStub* s = new(i486_code_length) VtableStub(true, vtable_index);
  // Can be NULL if there is no free space in the code cache.
  if (s == NULL) {
    return NULL;
  }

  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), i486_code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

#ifndef PRODUCT

  if (CountCompiledCalls) {
    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  }
#endif /* PRODUCT */

  // get receiver (need to skip return address on top of stack)
  assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx");

  // get receiver klass
  address npe_addr = __ pc();
  __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));

#ifndef PRODUCT
  if (DebugVtables) {
    Label L;
    // check offset vs vtable length
    __ cmpl(Address(rax, InstanceKlass::vtable_length_offset()*wordSize), vtable_index*vtableEntry::size());
    __ jcc(Assembler::greater, L);
    __ movl(rbx, vtable_index);
    __ call_VM(noreg, CAST_FROM_FN_PTR(address, bad_compiled_vtable_index), rcx, rbx);
    __ bind(L);
  }
#endif // PRODUCT

  const Register method = rbx;

  // load Method* and target address
  __ lookup_virtual_method(rax, vtable_index, method);

  if (DebugVtables) {
    Label L;
    __ cmpptr(method, (int32_t)NULL_WORD);
    __ jcc(Assembler::equal, L);
    __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
    __ jcc(Assembler::notZero, L);
    __ stop("Vtable entry is NULL");
    __ bind(L);
  }

  // rax,: receiver klass
  // method (rbx): Method*
  // rcx: receiver
  address ame_addr = __ pc();
  __ jmp( Address(method, Method::from_compiled_offset()));

  masm->flush();

  if (PrintMiscellaneous && (WizardMode || Verbose)) {
    tty->print_cr("vtable #%d at "PTR_FORMAT"[%d] left over: %d",
                  vtable_index, s->entry_point(),
                  (int)(s->code_end() - s->entry_point()),
                  (int)(s->code_end() - __ pc()));
  }
  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
  // shut the door on sizing bugs
  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
  assert(vtable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");

  s->set_exception_points(npe_addr, ame_addr);
  return s;
}
コード例 #30
0
VtableStub* VtableStubs::create_itable_stub(int itable_index) {
  // Note well: pd_code_size_limit is the absolute minimum we can get away with.  If you
  //            add code here, bump the code stub size returned by pd_code_size_limit!
  const int i486_code_length = VtableStub::pd_code_size_limit(false);
  VtableStub* s = new(i486_code_length) VtableStub(false, itable_index);
  // Can be NULL if there is no free space in the code cache.
  if (s == NULL) {
    return NULL;
  }

  ResourceMark rm;
  CodeBuffer cb(s->entry_point(), i486_code_length);
  MacroAssembler* masm = new MacroAssembler(&cb);

  // Entry arguments:
  //  rax,: Interface
  //  rcx: Receiver

#ifndef PRODUCT
  if (CountCompiledCalls) {
    __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
  }
#endif /* PRODUCT */
  // get receiver (need to skip return address on top of stack)

  assert(VtableStub::receiver_location() == rcx->as_VMReg(), "receiver expected in rcx");

  // get receiver klass (also an implicit null-check)
  address npe_addr = __ pc();
  __ movptr(rsi, Address(rcx, oopDesc::klass_offset_in_bytes()));

  // Most registers are in use; we'll use rax, rbx, rsi, rdi
  // (If we need to make rsi, rdi callee-save, do a push/pop here.)
  const Register method = rbx;
  Label throw_icce;

  // Get Method* and entrypoint for compiler
  __ lookup_interface_method(// inputs: rec. class, interface, itable index
                             rsi, rax, itable_index,
                             // outputs: method, scan temp. reg
                             method, rdi,
                             throw_icce);

  // method (rbx): Method*
  // rcx: receiver

#ifdef ASSERT
  if (DebugVtables) {
      Label L1;
      __ cmpptr(method, (int32_t)NULL_WORD);
      __ jcc(Assembler::equal, L1);
      __ cmpptr(Address(method, Method::from_compiled_offset()), (int32_t)NULL_WORD);
      __ jcc(Assembler::notZero, L1);
      __ stop("Method* is null");
      __ bind(L1);
    }
#endif // ASSERT

  address ame_addr = __ pc();
  __ jmp(Address(method, Method::from_compiled_offset()));

  __ bind(throw_icce);
  __ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
  masm->flush();

  if (PrintMiscellaneous && (WizardMode || Verbose)) {
    tty->print_cr("itable #%d at "PTR_FORMAT"[%d] left over: %d",
                  itable_index, s->entry_point(),
                  (int)(s->code_end() - s->entry_point()),
                  (int)(s->code_end() - __ pc()));
  }
  guarantee(__ pc() <= s->code_end(), "overflowed buffer");
  // shut the door on sizing bugs
  int slop = 3;  // 32-bit offset is this much larger than an 8-bit one
  assert(itable_index > 10 || __ pc() + slop <= s->code_end(), "room for 32-bit offset");

  s->set_exception_points(npe_addr, ame_addr);
  return s;
}