void C1_MacroAssembler::unlock_object(Register Rmark, Register Roop, Register Rbox, Label& slow_case) { assert_different_registers(Rmark, Roop, Rbox); Label done; Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); assert(mark_addr.disp() == 0, "cas must take a zero displacement"); if (UseBiasedLocking) { // load the object out of the BasicObjectLock ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop); verify_oop(Roop); biased_locking_exit(mark_addr, Rmark, done); } // Test first it it is a fast recursive unlock ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); br_null_short(Rmark, Assembler::pt, done); if (!UseBiasedLocking) { // load object ld_ptr(Rbox, BasicObjectLock::obj_offset_in_bytes(), Roop); verify_oop(Roop); } // Check if it is still a light weight lock, this is is true if we see // the stack address of the basicLock in the markOop of the object cas_ptr(mark_addr.base(), Rbox, Rmark); cmp(Rbox, Rmark); brx(Assembler::notEqual, false, Assembler::pn, slow_case); delayed()->nop(); // Done bind(done); }
sval h_create_vm_class(struct cpu_thread* thread, uval type, uval id, uval ea_base, uval size, uval imp_arg1, uval imp_arg2, uval imp_arg3, uval imp_arg4) { (void)imp_arg2; (void)imp_arg3; (void)imp_arg4; sval ret = H_Parameter; struct vm_class *vmc = NULL; lock_acquire(&thread->cpu->os->po_mutex); vmc = vmc_lookup(thread, id); if (vmc) { ret = H_UNAVAIL; goto done; } switch (type) { case H_VM_CLASS_LINEAR: vmc = vmc_create_linear(id, ea_base, size, imp_arg1); break; case H_VM_CLASS_REFLECT: vmc = vmc_create_reflect(id, ea_base, size); break; case H_VM_CLASS_TABLE: vmc = vmc_create_table(thread, id, ea_base, size, imp_arg1); break; } if (!vmc) goto done; hprintf("%s %ld ea: 0x%lx\n", __func__, id, ea_base); if (id < NUM_KERNEL_VMC) { if (!cas_ptr(&thread->cpu->os->vmc_kernel[id], NULL, vmc)) { assert(!thread->cpu->os->vmc_kernel[id], "Kernel VMC id already defined\n"); } } else { ht_insert(&thread->cpu->os->vmc_hash, &vmc->vmc_hash); } ret = H_Success; done: lock_release(&thread->cpu->os->po_mutex); return ret; }
void C1_MacroAssembler::lock_object(Register Rmark, Register Roop, Register Rbox, Register Rscratch, Label& slow_case) { assert_different_registers(Rmark, Roop, Rbox, Rscratch); Label done; Address mark_addr(Roop, oopDesc::mark_offset_in_bytes()); // The following move must be the first instruction of emitted since debug // information may be generated for it. // Load object header ld_ptr(mark_addr, Rmark); verify_oop(Roop); // save object being locked into the BasicObjectLock st_ptr(Roop, Rbox, BasicObjectLock::obj_offset_in_bytes()); if (UseBiasedLocking) { biased_locking_enter(Roop, Rmark, Rscratch, done, &slow_case); } // Save Rbox in Rscratch to be used for the cas operation mov(Rbox, Rscratch); // and mark it unlocked or3(Rmark, markOopDesc::unlocked_value, Rmark); // save unlocked object header into the displaced header location on the stack st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); // compare object markOop with Rmark and if equal exchange Rscratch with object markOop assert(mark_addr.disp() == 0, "cas must take a zero displacement"); cas_ptr(mark_addr.base(), Rmark, Rscratch); // if compare/exchange succeeded we found an unlocked object and we now have locked it // hence we are done cmp(Rmark, Rscratch); brx(Assembler::equal, false, Assembler::pt, done); delayed()->sub(Rscratch, SP, Rscratch); //pull next instruction into delay slot // we did not find an unlocked object so see if this is a recursive case // sub(Rscratch, SP, Rscratch); assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); andcc(Rscratch, 0xfffff003, Rscratch); brx(Assembler::notZero, false, Assembler::pn, slow_case); delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); bind(done); }