示例#1
0
void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
    assert_different_registers(Rmark, Roop, Rbox);

    Label done;

    Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
    assert(mark_addr.disp() == 0, "cas must take a zero displacement");

    if (UseBiasedLocking) {
        // load the object out of the BasicObjectLock
        ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop);
        verify_oop(Roop);
        biased_locking_exit(mark_addr, Rmark, done);
    }
    // Test first it it is a fast recursive unlock
    ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark);
    br_null_short(Rmark, Assembler::pt, done);
    if (!UseBiasedLocking) {
        // load object
        ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop);
        verify_oop(Roop);
    }

    // Check if it is still a light weight lock, this is is true if we see
    // the stack address of the basicLock in the markOop of the object
    cas_ptr(mark_addr.base(), Rbox, Rmark);
    cmp(Rbox, Rmark);

    brx(Assembler::notEqual, false, Assembler::pn, slow_case);
    delayed()->nop();
    // Done
    bind(done);
}
// --- ref_cas_with_check
// Write barriered compare of Rcmp with memory, if equal set memory to Rval. Set
// flags dependent on success. Returns relpc of where NPE info should be added.
// NB on failure Rcmp contains the value from memory, this will be poisoned and
// not lvb-ed. ie. you shouldn't use this value.
int C1_MacroAssembler::ref_cas_with_check(RInOuts, Register Rbase, Register Rcmp, Register Rtmp0, Register Rtmp1,
                                          RKeepIns, Register Rindex, int off, int scale, Register Rval,
                                          CodeEmitInfo *info) {
  assert0( Rcmp == RAX );
  Label checked, strip;

#ifdef ASSERT
  verify_oop(Rval, MacroAssembler::OopVerify_Store);
  verify_oop(Rcmp, MacroAssembler::OopVerify_Move);
  if (RefPoisoning) move8(Rtmp1,Rval); // Save Rval
#endif

  null_chk( Rval,strip  ); // NULLs are always safe to store

  pre_write_barrier_compiled(RInOuts::a, Rtmp0, Rtmp1,
                             RKeepIns::a, Rbase, off, Rindex, scale, Rval,
                             info);

#ifdef ASSERT
  if (RefPoisoning) {
    mov8  (Rtmp1,Rval);  // Save Rval again as it will have been squashed by the barrier
    always_poison(Rval); // Must store poisoned ref
  }
#endif // ASSERT
bind(strip);
  verify_not_null_oop(Rbase, MacroAssembler::OopVerify_StoreBase);
  cvta2va(Rbase);
#ifdef ASSERT
  if (RefPoisoning) {
    poison(Rcmp);      // Compared register must also be posioned
  }
#endif // ASSERT
bind(checked);
  int npe_relpc = rel_pc();
#ifdef ASSERT
  // check the value to be cas-ed is an oop, npe on this rather than the store
  if (should_verify_oop(MacroAssembler::OopVerify_OverWrittenField))
    npe_relpc = verify_oop (Rbase, off, Rindex, scale, MacroAssembler::OopVerify_OverWrittenField);
#endif
  if (Rindex == noreg) locked()->cas8 (Rbase, off, Rval);
  else                 locked()->cas8 (Rbase, off, Rindex, scale, Rval);
#ifdef ASSERT
  pushf();
  if (RefPoisoning) {
    mov8    (Rval,Rtmp1); // Restore unpoisoned Rval
    unpoison(Rcmp);       // Compared register must also be unposioned
  }
  verify_oop(Rval, MacroAssembler::OopVerify_Move);
  verify_oop(Rcmp, MacroAssembler::OopVerify_Move);
  popf();
#endif
  return npe_relpc;
}
  address generate_catch_exception() {
    StubCodeMark mark(this, "StubRoutines", "catch_exception");
    const Address esp_after_call(ebp, -4 * wordSize); // same as in generate_call_stub()!
    const Address thread        (ebp,  9 * wordSize); // same as in generate_call_stub()!
    address start = __ pc();

    // get thread directly
    __ movl(ecx, thread);
#ifdef ASSERT
    // verify that threads correspond
    { Label L;
      __ get_thread(ebx);
      __ cmpl(ebx, ecx);
      __ jcc(Assembler::equal, L);
      __ stop("StubRoutines::catch_exception: threads must correspond");
      __ bind(L);
    }
#endif
    // set pending exception
    __ verify_oop(eax);
    __ movl(Address(ecx, Thread::pending_exception_offset()), eax          );
    __ movl(Address(ecx, Thread::exception_file_offset   ()), (int)__FILE__);
    __ movl(Address(ecx, Thread::exception_line_offset   ()),      __LINE__);
    // complete return to VM
    assert(StubRoutines::_call_stub_return_address != NULL, "_call_stub_return_address must have been generated before");
    __ jmp(StubRoutines::_call_stub_return_address, relocInfo::none);

    return start;
  }
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
    const int aligned_mask = BytesPerWord -1;
    const int hdr_offset = oopDesc::mark_offset_in_bytes();
    assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction");
    assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
    Label done;

    if (UseBiasedLocking) {
        // load object
        movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
        biased_locking_exit(obj, hdr, done);
    }

    // load displaced header
    movptr(hdr, Address(disp_hdr, 0));
    // if the loaded hdr is NULL we had recursive locking
    testptr(hdr, hdr);
    // if we had recursive locking, we are done
    jcc(Assembler::zero, done);
    if (!UseBiasedLocking) {
        // load object
        movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
    }
    verify_oop(obj);
    // test if object header is pointing to the displaced header, and if so, restore
    // the displaced header in the object - if the object header is not pointing to
    // the displaced header, get the object header instead
    if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
    cmpxchgptr(hdr, Address(obj, hdr_offset));
    // if the object header was not pointing to the displaced header,
    // we do unlocking via runtime call
    jcc(Assembler::notEqual, slow_case);
    // done
    bind(done);
}
示例#5
0
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
  const int aligned_mask = BytesPerWord -1;
  const int hdr_offset = oopDesc::mark_offset_in_bytes();
  assert_different_registers(hdr, obj, disp_hdr);
  NearLabel done;

  if (UseBiasedLocking) {
    // Load object.
    z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
    biased_locking_exit(obj, hdr, done);
  }

  // Load displaced header.
  z_ltg(hdr, Address(disp_hdr, (intptr_t)0));
  // If the loaded hdr is NULL we had recursive locking, and we are done.
  z_bre(done);
  if (!UseBiasedLocking) {
    // Load object.
    z_lg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
  }
  verify_oop(obj);
  // Test if object header is pointing to the displaced header, and if so, restore
  // the displaced header in the object. If the object header is not pointing to
  // the displaced header, get the object header instead.
  z_csg(disp_hdr, hdr, hdr_offset, obj);
  // If the object header was not pointing to the displaced header,
  // we do unlocking via runtime call.
  branch_optimized(Assembler::bcondNotEqual, slow_case);
  // done
  bind(done);
}
示例#6
0
文件: memOop.cpp 项目: AdamSpitz/self
bool memOopClass::verify() {
  bool flag = verify_oop(this == Memory->errorObj);
  if (flag) {
    markOop m = mark();
    if (! oop(m)->is_mark()) {
      error1("mark of memOop 0x%lx isn't a markOop", this);
      if (! m->verify_oop())
        error1(" mark of memOop 0x%lx isn't even a legal oop", this);
      flag = false;
    }
    else if (is_objectMarked()) {
      error1("memOop 0x%lx is marked!", this);
      flag = false;
    }
    mapOop p = map()->enclosing_mapOop();
    if (! oop(p)->verify_oop()) {
      error1("map of memOop 0x%lx isn't a legal oop", this);
      flag = false;
    } else if (! p->is_map()) {
      error1("map of memOop 0x%lx isn't a mapOop", this);
      flag = false;
    } else if (map()->should_canonicalize()) {
      // put this test here so we only check maps actually used in objs--dmu
      Memory->map_table->verify_map((slotsMapDeps*)(map()));
    }
  }
  return flag;
}
void MethodHandles::verify_klass(MacroAssembler* _masm,
                                 Register obj, SystemDictionary::WKID klass_id,
                                 const char* error_message) {
  InstanceKlass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
  KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
  Register temp = rscratch2;
  Register temp2 = rscratch1; // used by MacroAssembler::cmpptr
  Label L_ok, L_bad;
  BLOCK_COMMENT("verify_klass {");
  __ verify_oop(obj);
  __ cbz(obj, L_bad);
  __ push(RegSet::of(temp, temp2), sp);
  __ load_klass(temp, obj);
  __ cmpptr(temp, ExternalAddress((address) klass_addr));
  __ br(Assembler::EQ, L_ok);
  intptr_t super_check_offset = klass->super_check_offset();
  __ ldr(temp, Address(temp, super_check_offset));
  __ cmpptr(temp, ExternalAddress((address) klass_addr));
  __ br(Assembler::EQ, L_ok);
  __ pop(RegSet::of(temp, temp2), sp);
  __ bind(L_bad);
  __ stop(error_message);
  __ BIND(L_ok);
  __ pop(RegSet::of(temp, temp2), sp);
  BLOCK_COMMENT("} verify_klass");
}
示例#8
0
void MethodHandles::verify_klass(MacroAssembler* _masm,
                                 Register obj, SystemDictionary::WKID klass_id,
                                 const char* error_message) {
  Klass** klass_addr = SystemDictionary::well_known_klass_addr(klass_id);
  KlassHandle klass = SystemDictionary::well_known_klass(klass_id);
  Register temp = rdi;
  Register temp2 = noreg;
  LP64_ONLY(temp2 = rscratch1);  // used by MacroAssembler::cmpptr
  Label L_ok, L_bad;
  BLOCK_COMMENT("verify_klass {");
  __ verify_oop(obj);
  __ testptr(obj, obj);
  __ jcc(Assembler::zero, L_bad);
  __ push(temp); if (temp2 != noreg)  __ push(temp2);
#define UNPUSH { if (temp2 != noreg)  __ pop(temp2);  __ pop(temp); }
  __ load_klass(temp, obj);
  __ cmpptr(temp, ExternalAddress((address) klass_addr));
  __ jcc(Assembler::equal, L_ok);
  intptr_t super_check_offset = klass->super_check_offset();
  __ movptr(temp, Address(temp, super_check_offset));
  __ cmpptr(temp, ExternalAddress((address) klass_addr));
  __ jcc(Assembler::equal, L_ok);
  UNPUSH;
  __ bind(L_bad);
  __ STOP(error_message);
  __ BIND(L_ok);
  UNPUSH;
  BLOCK_COMMENT("} verify_klass");
}
示例#9
0
void C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
  const int hdr_offset = oopDesc::mark_offset_in_bytes();
  assert_different_registers(hdr, obj, disp_hdr);
  NearLabel done;

  verify_oop(obj);

  // Load object header.
  z_lg(hdr, Address(obj, hdr_offset));

  // Save object being locked into the BasicObjectLock...
  z_stg(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));

  if (UseBiasedLocking) {
    biased_locking_enter(obj, hdr, Z_R1_scratch, Z_R0_scratch, done, &slow_case);
  }

  // and mark it as unlocked.
  z_oill(hdr, markOopDesc::unlocked_value);
  // Save unlocked object header into the displaced header location on the stack.
  z_stg(hdr, Address(disp_hdr, (intptr_t)0));
  // Test if object header is still the same (i.e. unlocked), and if so, store the
  // displaced header address in the object header. If it is not the same, get the
  // object header instead.
  z_csg(hdr, disp_hdr, hdr_offset, obj);
  // If the object header was the same, we're done.
  if (PrintBiasedLockingStatistics) {
    Unimplemented();
#if 0
    cond_inc32(Assembler::equal,
               ExternalAddress((address)BiasedLocking::fast_path_entry_count_addr()));
#endif
  }
  branch_optimized(Assembler::bcondEqual, done);
  // If the object header was not the same, it is now in the hdr register.
  // => Test if it is a stack pointer into the same stack (recursive locking), i.e.:
  //
  // 1) (hdr & markOopDesc::lock_mask_in_place) == 0
  // 2) rsp <= hdr
  // 3) hdr <= rsp + page_size
  //
  // These 3 tests can be done by evaluating the following expression:
  //
  // (hdr - Z_SP) & (~(page_size-1) | markOopDesc::lock_mask_in_place)
  //
  // assuming both the stack pointer and page_size have their least
  // significant 2 bits cleared and page_size is a power of 2
  z_sgr(hdr, Z_SP);

  load_const_optimized(Z_R0_scratch, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
  z_ngr(hdr, Z_R0_scratch); // AND sets CC (result eq/ne 0).
  // For recursive locking, the result is zero. => Save it in the displaced header
  // location (NULL in the displaced hdr location indicates recursive locking).
  z_stg(hdr, Address(disp_hdr, (intptr_t)0));
  // Otherwise we don't care about the result and handle locking via runtime call.
  branch_optimized(Assembler::bcondNotZero, slow_case);
  // done
  bind(done);
}
示例#10
0
address AbstractInterpreterGenerator::generate_result_handler_for(BasicType type) {
  //
  // Registers alive
  //   R3_RET
  //   LR
  //
  // Registers updated
  //   R3_RET
  //

  Label done;
  address entry = __ pc();

  switch (type) {
  case T_BOOLEAN:
    // convert !=0 to 1
    __ neg(R0, R3_RET);
    __ orr(R0, R3_RET, R0);
    __ srwi(R3_RET, R0, 31);
    break;
  case T_BYTE:
     // sign extend 8 bits
     __ extsb(R3_RET, R3_RET);
     break;
  case T_CHAR:
     // zero extend 16 bits
     __ clrldi(R3_RET, R3_RET, 48);
     break;
  case T_SHORT:
     // sign extend 16 bits
     __ extsh(R3_RET, R3_RET);
     break;
  case T_INT:
     // sign extend 32 bits
     __ extsw(R3_RET, R3_RET);
     break;
  case T_LONG:
     break;
  case T_OBJECT:
    // unbox result if not null
    __ cmpdi(CCR0, R3_RET, 0);
    __ beq(CCR0, done);
    __ ld(R3_RET, 0, R3_RET);
    __ verify_oop(R3_RET);
    break;
  case T_FLOAT:
     break;
  case T_DOUBLE:
     break;
  case T_VOID:
     break;
  default: ShouldNotReachHere();
  }

  __ BIND(done);
  __ blr();

  return entry;
}
示例#11
0
void C1_MacroAssembler::verify_not_null_oop(Register r) {
  if (!VerifyOops) return;
  NearLabel not_null;
  compareU64_and_branch(r, (intptr_t)0, bcondNotEqual, not_null);
  stop("non-null oop required");
  bind(not_null);
  verify_oop(r);
}
示例#12
0
void C1_MacroAssembler::verify_not_null_oop(Register r) {
    Label not_null;
    br_notnull_short(r, Assembler::pt, not_null);
    stop("non-null oop required");
    bind(not_null);
    if (!VerifyOops) return;
    verify_oop(r);
}
示例#13
0
void C1_MacroAssembler::allocate_array(
  Register obj,                        // result: Pointer to array after successful allocation.
  Register len,                        // array length
  Register t1,                         // temp register
  Register t2,                         // temp register
  int      hdr_size,                   // object header size in words
  int      elt_size,                   // element size in bytes
  Register klass,                      // object klass
  Label&   slow_case                   // Continuation point if fast allocation fails.
) {
  assert_different_registers(obj, len, t1, t2, klass);

  // Determine alignment mask.
  assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");

  // Check for negative or excessive length.
  compareU64_and_branch(len, (int32_t)max_array_allocation_length, bcondHigh, slow_case);

  // Compute array size.
  // Note: If 0 <= len <= max_length, len*elt_size + header + alignment is
  // smaller or equal to the largest integer. Also, since top is always
  // aligned, we can do the alignment here instead of at the end address
  // computation.
  const Register arr_size = t2;
  switch (elt_size) {
    case  1: lgr_if_needed(arr_size, len); break;
    case  2: z_sllg(arr_size, len, 1); break;
    case  4: z_sllg(arr_size, len, 2); break;
    case  8: z_sllg(arr_size, len, 3); break;
    default: ShouldNotReachHere();
  }
  add2reg(arr_size, hdr_size * wordSize + MinObjAlignmentInBytesMask); // Add space for header & alignment.
  z_nill(arr_size, (~MinObjAlignmentInBytesMask) & 0xffff);            // Align array size.

  try_allocate(obj, arr_size, 0, t1, slow_case);

  initialize_header(obj, klass, len, noreg, t1);

  // Clear rest of allocated space.
  Label done;
  Register object_fields = t1;
  Register Rzero = Z_R1_scratch;
  z_aghi(arr_size, -(hdr_size * BytesPerWord));
  z_bre(done); // Jump if size of fields is zero.
  z_la(object_fields, hdr_size * BytesPerWord, obj);
  z_xgr(Rzero, Rzero);
  initialize_body(object_fields, arr_size, Rzero);
  bind(done);

  // Dtrace support is unimplemented.
  // if (CURRENT_ENV->dtrace_alloc_probes()) {
  //   assert(obj == rax, "must be");
  //   call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id)));
  // }

  verify_oop(obj);
}
示例#14
0
void C1_MacroAssembler::verify_not_null_oop(Register r) {
  Label not_null;
  cmpdi(CCR0, r, 0);
  bne(CCR0, not_null);
  stop("non-null oop required");
  bind(not_null);
  if (!VerifyOops) return;
  verify_oop(r);
}
// --- ref_store_with_check
// Store oop taking care of SBA escape and barriers. Returns relpc of where NPE
// info should be added.
int C1_MacroAssembler::ref_store_with_check(RInOuts, Register Rbase, Register Rtmp0, Register Rtmp1,
                                            RKeepIns, Register Rindex, int off, int scale, Register Rval,
                                            CodeEmitInfo *info) {
Label strip;

#ifdef ASSERT
  verify_oop(Rval, MacroAssembler::OopVerify_Store);
  if (RefPoisoning) move8(Rtmp1,Rval); // Save Rval
#endif

  null_chk( Rval,strip  ); // NULLs are always safe to store

  pre_write_barrier_compiled(RInOuts::a, Rtmp0, Rtmp1,
                             RKeepIns::a, Rbase, off, Rindex, scale, Rval,
                             info);
#ifdef ASSERT
  if (RefPoisoning) {
    mov8  (Rtmp1,Rval);   // Save Rval again as it will have been squashed by the barrier
    if( Rbase == Rval ) { // if base can look like value then don't poison base
      assert0( MultiMapMetaData );
      Rval = Rtmp1;
      Rtmp1 = Rbase;
    }
    always_poison(Rval); // Poison ref
  }
#endif // ASSERT
bind(strip);
  verify_not_null_oop(Rbase, MacroAssembler::OopVerify_StoreBase);
  cvta2va(Rbase);
  int npe_relpc = rel_pc();
#ifdef ASSERT
  // check the value to be squashed is an oop, npe on this rather than the store
  if (should_verify_oop(MacroAssembler::OopVerify_OverWrittenField))
    npe_relpc = verify_oop (Rbase, off, Rindex, scale, MacroAssembler::OopVerify_OverWrittenField);
#endif
  if (Rindex == noreg) st8  (Rbase, off, Rval);
  else                 st8  (Rbase, off, Rindex, scale, Rval);
#ifdef ASSERT
  if (RefPoisoning) mov8(Rval,Rtmp1); // Restore unpoisoned Rval
#endif
  return npe_relpc;
}
示例#16
0
void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) {
  assert_different_registers(Rmark, Roop, Rbox);

  Label slow_int, done;

  Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());
  assert(mark_addr.disp() == 0, "cas must take a zero displacement");

  if (UseBiasedLocking) {
    // Load the object out of the BasicObjectLock.
    ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
    verify_oop(Roop);
    biased_locking_exit(CCR0, Roop, R0, done);
  }
  // Test first it it is a fast recursive unlock.
  ld(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);
  cmpdi(CCR0, Rmark, 0);
  beq(CCR0, done);
  if (!UseBiasedLocking) {
    // Load object.
    ld(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);
    verify_oop(Roop);
  }

  // Check if it is still a light weight lock, this is is true if we see
  // the stack address of the basicLock in the markOop of the object.
  cmpxchgd(/*flag=*/CCR0,
           /*current_value=*/R0,
           /*compare_value=*/Rbox,
           /*exchange_value=*/Rmark,
           /*where=*/Roop,
           MacroAssembler::MemBarRel,
           MacroAssembler::cmpxchgx_hint_release_lock(),
           noreg,
           &slow_int);
  b(done);
  bind(slow_int);
  b(slow_case); // far

  // Done
  bind(done);
}
address CppInterpreterGenerator::generate_result_handler_for(BasicType type)
{
  address start = __ pc();

  switch (type) {
  case T_VOID:
    break;

  case T_BOOLEAN:
    {
      Label zero;

      __ compare (r3, 0);
      __ beq (zero);
      __ load (r3, 1);
      __ bind (zero);
    }
    break;

  case T_CHAR:
    __ andi_ (r3, r3, 0xffff);
    break;

  case T_BYTE:
    __ extsb (r3, r3);
    break;
    
  case T_SHORT:
    __ extsh (r3, r3);
    break;

  case T_INT:
#ifdef PPC64
    __ extsw (r3, r3);
#endif
    break;

  case T_LONG:
  case T_FLOAT:
  case T_DOUBLE:
    break;

  case T_OBJECT:
    __ load (r3, STATE(_oop_temp));
    __ verify_oop (r3);
    break;

  default:
    ShouldNotReachHere();
  }
  __ blr ();

  return start;
}
示例#18
0
void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox, Register Rscratch, Label& slow_case) {
  assert_different_registers(Rmark, Roop, Rbox, Rscratch);

  Label done, cas_failed, slow_int;

  // The following move must be the first instruction of emitted since debug
  // information may be generated for it.
  // Load object header.
  ld(Rmark, oopDesc::mark_offset_in_bytes(), Roop);

  verify_oop(Roop);

  // Save object being locked into the BasicObjectLock...
  std(Roop, BasicObjectLock::obj_offset_in_bytes(), Rbox);

  if (UseBiasedLocking) {
    biased_locking_enter(CCR0, Roop, Rmark, Rscratch, R0, done, &slow_int);
  }

  // ... and mark it unlocked.
  ori(Rmark, Rmark, markOopDesc::unlocked_value);

  // Save unlocked object header into the displaced header location on the stack.
  std(Rmark, BasicLock::displaced_header_offset_in_bytes(), Rbox);

  // Compare object markOop with Rmark and if equal exchange Rscratch with object markOop.
  assert(oopDesc::mark_offset_in_bytes() == 0, "cas must take a zero displacement");
  cmpxchgd(/*flag=*/CCR0,
           /*current_value=*/Rscratch,
           /*compare_value=*/Rmark,
           /*exchange_value=*/Rbox,
           /*where=*/Roop/*+0==mark_offset_in_bytes*/,
           MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
           MacroAssembler::cmpxchgx_hint_acquire_lock(),
           noreg,
           &cas_failed,
           /*check without membar and ldarx first*/true);
  // If compare/exchange succeeded we found an unlocked object and we now have locked it
  // hence we are done.
  b(done);

  bind(slow_int);
  b(slow_case); // far

  bind(cas_failed);
  // We did not find an unlocked object so see if this is a recursive case.
  sub(Rscratch, Rscratch, R1_SP);
  load_const_optimized(R0, (~(os::vm_page_size()-1) | markOopDesc::lock_mask_in_place));
  and_(R0/*==0?*/, Rscratch, R0);
  std(R0/*==0, perhaps*/, BasicLock::displaced_header_offset_in_bytes(), Rbox);
  bne(CCR0, slow_int);

  bind(done);
}
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
                                        Register recv, Register method_temp,
                                        Register temp2,
                                        bool for_compiler_entry) {
  BLOCK_COMMENT("jump_to_lambda_form {");
  // This is the initial entry point of a lazy method handle.
  // After type checking, it picks up the invoker from the LambdaForm.
  assert_different_registers(recv, method_temp, temp2);
  assert(recv != noreg, "required register");
  assert(method_temp == rmethod, "required register for loading method");

  //NOT_PRODUCT({ FlagSetting fs(TraceMethodHandles, true); trace_method_handle(_masm, "LZMH"); });

  // Load the invoker, as MH -> MH.form -> LF.vmentry
  __ verify_oop(recv);
  __ load_heap_oop(method_temp, Address(recv, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes())));
  __ verify_oop(method_temp);
  __ load_heap_oop(method_temp, Address(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes())));
  __ verify_oop(method_temp);
  // the following assumes that a Method* is normally compressed in the vmtarget field:
  __ ldr(method_temp, Address(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes())));

  if (VerifyMethodHandles && !for_compiler_entry) {
    // make sure recv is already on stack
    __ ldr(temp2, Address(method_temp, Method::const_offset()));
    __ load_sized_value(temp2,
                        Address(temp2, ConstMethod::size_of_parameters_offset()),
                        sizeof(u2), /*is_signed*/ false);
    // assert(sizeof(u2) == sizeof(Method::_size_of_parameters), "");
    Label L;
    __ ldr(rscratch1, __ argument_address(temp2, -1));
    __ cmp(recv, rscratch1);
    __ br(Assembler::EQ, L);
    __ ldr(r0, __ argument_address(temp2, -1));
    __ hlt(0);
    __ BIND(L);
  }

  jump_from_method_handle(_masm, method_temp, temp2, for_compiler_entry);
  BLOCK_COMMENT("} jump_to_lambda_form");
}
  address generate_forward_exception() {
    StubCodeMark mark(this, "StubRoutines", "forward exception");
    address start = __ pc();

    // Upon entry, the sp points to the return address returning into Java
    // (interpreted or compiled) code; i.e., the return address becomes the
    // throwing pc.
    //
    // Arguments pushed before the runtime call are still on the stack but
    // the exception handler will reset the stack pointer -> ignore them.
    // A potential result in registers can be ignored as well.

#ifdef ASSERT
    // make sure this code is only executed if there is a pending exception
    { Label L;
      __ get_thread(ecx);
      __ cmpl(Address(ecx, Thread::pending_exception_offset()), (int)NULL);
      __ jcc(Assembler::notEqual, L);
      __ stop("StubRoutines::forward exception: no pending exception (1)");
      __ bind(L);
    }
#endif

    // compute exception handler into ebx
    __ movl(eax, Address(esp));
    __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), eax);
    __ movl(ebx, eax);

    // setup eax & edx, remove return address & clear pending exception
    __ get_thread(ecx);
    __ popl(edx);
    __ movl(eax, Address(ecx, Thread::pending_exception_offset()));
    __ movl(Address(ecx, Thread::pending_exception_offset()), (int)NULL);

#ifdef ASSERT
    // make sure exception is set
    { Label L;
      __ testl(eax, eax);
      __ jcc(Assembler::notEqual, L);
      __ stop("StubRoutines::forward exception: no pending exception (2)");
      __ bind(L);
    }
#endif

    // continue at exception handler (return address removed)
    // eax: exception
    // ebx: exception handler
    // edx: throwing pc
    __ verify_oop(eax);
    __ jmp(ebx);

    return start;
  }
示例#21
0
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
    Label L;
    const Register temp_reg = G3_scratch;
    // Note: needs more testing of out-of-line vs. inline slow case
    verify_oop(receiver);
    load_klass(receiver, temp_reg);
    cmp_and_brx_short(temp_reg, iCache, Assembler::equal, Assembler::pt, L);
    AddressLiteral ic_miss(SharedRuntime::get_ic_miss_stub());
    jump_to(ic_miss, temp_reg);
    delayed()->nop();
    align(CodeEntryAlignment);
    bind(L);
}
address CppInterpreterGenerator::generate_stack_to_native_abi_converter(
    BasicType type)
{
  const Register stack = r5;

  address start = __ pc();

  switch (type) {
  case T_VOID:
    break;

  case T_BOOLEAN:
  case T_CHAR:
  case T_BYTE:
  case T_SHORT:
  case T_INT:
    __ load (stack, STATE(_stack));
    __ lwa (r3, Address(stack, wordSize));
    break;

  case T_LONG:
    __ load (stack, STATE(_stack));
    __ load (r3, Address(stack, wordSize));
#ifdef PPC32
    __ load (r4, Address(stack, wordSize * 2));
#endif
    break;

  case T_FLOAT:
    __ load (stack, STATE(_stack));
    __ lfs (f1, Address(stack, wordSize));
    break;

  case T_DOUBLE:
    __ load (stack, STATE(_stack));
    __ lfd (f1, Address(stack, wordSize));
    break;

  case T_OBJECT:
    __ load (stack, STATE(_stack));
    __ load (r3, Address(stack, wordSize));
    __ verify_oop (r3);
    break;
    
  default:
    ShouldNotReachHere();
  }
  __ blr ();

  return start;
}
address CppInterpreterGenerator::generate_stack_to_stack_converter(
    BasicType type)
{
  const Register stack = r3;

  address start = __ pc();

  switch (type) {
  case T_VOID:
    break;

  case T_BOOLEAN:
  case T_CHAR:
  case T_BYTE:
  case T_SHORT:
  case T_INT:
  case T_FLOAT:
    __ load (stack, STATE(_stack));
    __ lwz (r0, Address(stack, wordSize));
    __ stw (r0, Address(Rlocals, 0));
    __ subi (Rlocals, Rlocals, wordSize);
    break;

  case T_LONG:
  case T_DOUBLE:
    __ load (stack, STATE(_stack));
    __ load (r0, Address(stack, wordSize));
    __ store (r0, Address(Rlocals, -wordSize));
#ifdef PPC32
    __ load (r0, Address(stack, wordSize * 2));
    __ store (r0, Address(Rlocals, 0));
#endif
    __ subi (Rlocals, Rlocals, wordSize * 2);
    break;

  case T_OBJECT:
    __ load (stack, STATE(_stack));
    __ load (r0, Address(stack, wordSize));
    __ verify_oop (r0);
    __ store (r0, Address(Rlocals, 0));
    __ subi (Rlocals, Rlocals, wordSize);
    break;

  default:
    ShouldNotReachHere();
  }
  __ blr ();

  return start;
}
address CppInterpreterGenerator::generate_tosca_to_stack_converter(
    BasicType type)
{
  address start = __ pc();

  switch (type) {
  case T_VOID:
    break;

  case T_BOOLEAN:
  case T_CHAR:
  case T_BYTE:
  case T_SHORT:
  case T_INT:
    __ stw (r3, Address(Rlocals, 0));
    __ subi (Rlocals, Rlocals, wordSize);
    break;
    
  case T_LONG:
    __ store (r3, Address(Rlocals, -wordSize));
#ifdef PPC32
    __ store (r4, Address(Rlocals, 0));
#endif
    __ subi (Rlocals, Rlocals, wordSize * 2);
    break;

  case T_FLOAT:
    __ stfs (f1, Address(Rlocals, 0));
    __ subi (Rlocals, Rlocals, wordSize);
    break;

  case T_DOUBLE:
    __ stfd (f1, Address(Rlocals, -wordSize));
    __ subi (Rlocals, Rlocals, wordSize * 2);
    break;

  case T_OBJECT:
    __ verify_oop (r3);
    __ store (r3, Address(Rlocals, 0));
    __ subi (Rlocals, Rlocals, wordSize);
    break;

  default:
    ShouldNotReachHere();
  }
  __ blr ();

  return start;
}
示例#25
0
void MethodHandles::jump_to_lambda_form(MacroAssembler* _masm,
                                        Register recv, Register method_temp,
                                        Register temp2, Register temp3,
                                        bool for_compiler_entry) {
  BLOCK_COMMENT("jump_to_lambda_form {");
  // This is the initial entry point of a lazy method handle.
  // After type checking, it picks up the invoker from the LambdaForm.
  assert_different_registers(recv, method_temp, temp2);  // temp3 is only passed on
  assert(method_temp == R19_method, "required register for loading method");

  // Load the invoker, as MH -> MH.form -> LF.vmentry
  __ verify_oop(recv);
  __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_MethodHandle::form_offset_in_bytes()), recv, temp2);
  __ verify_oop(method_temp);
  __ load_heap_oop_not_null(method_temp, NONZERO(java_lang_invoke_LambdaForm::vmentry_offset_in_bytes()), method_temp, temp2);
  __ verify_oop(method_temp);
  // The following assumes that a Method* is normally compressed in the vmtarget field:
  __ ld(method_temp, NONZERO(java_lang_invoke_MemberName::vmtarget_offset_in_bytes()), method_temp);

  if (VerifyMethodHandles && !for_compiler_entry) {
    // Make sure recv is already on stack.
    __ ld(temp2, in_bytes(Method::const_offset()), method_temp);
    __ load_sized_value(temp2, in_bytes(ConstMethod::size_of_parameters_offset()), temp2,
                        sizeof(u2), /*is_signed*/ false);
    // assert(sizeof(u2) == sizeof(ConstMethod::_size_of_parameters), "");
    Label L;
    __ ld(temp2, __ argument_offset(temp2, temp2, 0), CC_INTERP_ONLY(R17_tos) NOT_CC_INTERP(R15_esp));
    __ cmpd(CCR1, temp2, recv);
    __ beq(CCR1, L);
    __ stop("receiver not on stack");
    __ BIND(L);
  }

  jump_from_method_handle(_masm, method_temp, temp2, temp3, for_compiler_entry);
  BLOCK_COMMENT("} jump_to_lambda_form");
}
示例#26
0
void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox, Register Rscratch, Label& slow_case) {
    assert_different_registers(Rmark, Roop, Rbox, Rscratch);

    Label done;

    Address mark_addr(Roop, oopDesc::mark_offset_in_bytes());

    // The following move must be the first instruction of emitted since debug
    // information may be generated for it.
    // Load object header
    ld_ptr(mark_addr, Rmark);

    verify_oop(Roop);

    // save object being locked into the BasicObjectLock
    st_ptr(Roop, Rbox, BasicObjectLock::obj_offset_in_bytes());

    if (UseBiasedLocking) {
        biased_locking_enter(Roop, Rmark, Rscratch, done, &slow_case);
    }

    // Save Rbox in Rscratch to be used for the cas operation
    mov(Rbox, Rscratch);

    // and mark it unlocked
    or3(Rmark, markOopDesc::unlocked_value, Rmark);

    // save unlocked object header into the displaced header location on the stack
    st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes());

    // compare object markOop with Rmark and if equal exchange Rscratch with object markOop
    assert(mark_addr.disp() == 0, "cas must take a zero displacement");
    cas_ptr(mark_addr.base(), Rmark, Rscratch);
    // if compare/exchange succeeded we found an unlocked object and we now have locked it
    // hence we are done
    cmp(Rmark, Rscratch);
    brx(Assembler::equal, false, Assembler::pt, done);
    delayed()->sub(Rscratch, SP, Rscratch);  //pull next instruction into delay slot
    // we did not find an unlocked object so see if this is a recursive case
    // sub(Rscratch, SP, Rscratch);
    assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
    andcc(Rscratch, 0xfffff003, Rscratch);
    brx(Assembler::notZero, false, Assembler::pn, slow_case);
    delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes());
    bind(done);
}
示例#27
0
void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
  const Register temp_reg = R12_scratch2;
  verify_oop(receiver);
  load_klass(temp_reg, receiver);
  if (TrapBasedICMissChecks) {
    trap_ic_miss_check(temp_reg, iCache);
  } else {
    Label L;
    cmpd(CCR0, temp_reg, iCache);
    beq(CCR0, L);
    //load_const_optimized(temp_reg, SharedRuntime::get_ic_miss_stub(), R0);
    calculate_address_from_global_toc(temp_reg, SharedRuntime::get_ic_miss_stub(), true, true, false);
    mtctr(temp_reg);
    bctr();
    align(32, 12);
    bind(L);
  }
}
示例#28
0
void C1_MacroAssembler::initialize_object(
  Register obj,                        // result: pointer to object after successful allocation
  Register klass,                      // object klass
  Register var_size_in_bytes,          // object size in bytes if unknown at compile time; invalid otherwise
  int      con_size_in_bytes,          // object size in bytes if   known at compile time
  Register t1,                         // temp register
  Register t2                          // temp register
  ) {
  const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;

  initialize_header(obj, klass, noreg, t1, t2);

#ifdef ASSERT
  {
    lwz(t1, in_bytes(Klass::layout_helper_offset()), klass);
    if (var_size_in_bytes != noreg) {
      cmpw(CCR0, t1, var_size_in_bytes);
    } else {
      cmpwi(CCR0, t1, con_size_in_bytes);
    }
    asm_assert_eq("bad size in initialize_object", 0x753);
  }
#endif

  // Initialize body.
  if (var_size_in_bytes != noreg) {
    // Use a loop.
    addi(t1, obj, hdr_size_in_bytes);                // Compute address of first element.
    addi(t2, var_size_in_bytes, -hdr_size_in_bytes); // Compute size of body.
    initialize_body(t1, t2);
  } else if (con_size_in_bytes > hdr_size_in_bytes) {
    // Use a loop.
    initialize_body(obj, t1, t2, con_size_in_bytes, hdr_size_in_bytes);
  }

  if (CURRENT_ENV->dtrace_alloc_probes()) {
    Unimplemented();
//    assert(obj == O0, "must be");
//    call(CAST_FROM_FN_PTR(address, Runtime1::entry_for(Runtime1::dtrace_object_alloc_id)),
//         relocInfo::runtime_call_type);
  }

  verify_oop(obj);
}
示例#29
0
void C1_MacroAssembler::initialize_object(
  Register obj,                        // result: Pointer to object after successful allocation.
  Register klass,                      // object klass
  Register var_size_in_bytes,          // Object size in bytes if unknown at compile time; invalid otherwise.
  int      con_size_in_bytes,          // Object size in bytes if   known at compile time.
  Register t1,                         // temp register
  Register t2                          // temp register
 ) {
  assert((con_size_in_bytes & MinObjAlignmentInBytesMask) == 0,
         "con_size_in_bytes is not multiple of alignment");
  assert(var_size_in_bytes == noreg, "not implemented");
  const int hdr_size_in_bytes = instanceOopDesc::header_size() * HeapWordSize;

  const Register Rzero = t2;

  z_xgr(Rzero, Rzero);
  initialize_header(obj, klass, noreg, Rzero, t1);

  // Clear rest of allocated space.
  const int threshold = 4 * BytesPerWord;
  if (con_size_in_bytes <= threshold) {
    // Use explicit null stores.
    // code size = 6*n bytes (n = number of fields to clear)
    for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
      z_stg(Rzero, Address(obj, i));
  } else {
    // Code size generated by initialize_body() is 16.
    Register object_fields = Z_R0_scratch;
    Register len_in_bytes  = Z_R1_scratch;
    z_la(object_fields, hdr_size_in_bytes, obj);
    load_const_optimized(len_in_bytes, con_size_in_bytes - hdr_size_in_bytes);
    initialize_body(object_fields, len_in_bytes, Rzero);
  }

  // Dtrace support is unimplemented.
  //  if (CURRENT_ENV->dtrace_alloc_probes()) {
  //    assert(obj == rax, "must be");
  //    call(RuntimeAddress(Runtime1::entry_for (Runtime1::dtrace_object_alloc_id)));
  //  }

  verify_oop(obj);
}
void C1_MacroAssembler::build_frame(FrameMap* frame_map) {
  // offset from the expected fixed sp within the method
  int sp_offset = in_bytes(frame_map->framesize_in_bytes()) - 8; // call pushed the return IP
  if( frame_map->num_callee_saves() > 0 ) {
    int callee_save_num = frame_map->num_callee_saves()-1;
    int callee_saves    = frame_map->callee_saves(); // bitmap
    for (int i=LinearScan::nof_cpu_regs-1; i>=0; i--) {
      if ((callee_saves & 1<<i) != 0) {
        int wanted_sp_offset = frame_map->address_for_callee_save(callee_save_num)._disp;
        assert0( sp_offset-8 == wanted_sp_offset );
        push((Register)i);
        sp_offset -= 8;
        callee_save_num--;
        assert0( callee_save_num >= -1 );
      }
    }
#ifdef ASSERT
for(int i=0;i<LinearScan::nof_xmm_regs;i++){
      int reg = LinearScan::nof_cpu_regs+i;
      assert ((callee_saves & 1<<reg) == 0, "Unexpected callee save XMM register");
    }
#endif
  }
  if (sp_offset != 0) {
    // make sp equal expected sp for method
    if (sp_offset == 8) push  (RCX);            // push reg as smaller encoding than sub8i
    else                sub8i (RSP, sp_offset );
  }
  if( should_verify_oop(MacroAssembler::OopVerify_IncomingArgument) ) {
    int args_len = frame_map->incoming_arguments()->length();
    for(int i=0; i < args_len; i++) {
      LIR_Opr arg = frame_map->incoming_arguments()->at(i);
      if (arg->is_valid() && arg->is_oop()) {
        VOopReg::VR oop = frame_map->oopregname(arg);
        verify_oop(oop, MacroAssembler::OopVerify_IncomingArgument);
      }
    }
  }
}