void jumpTableEntry::print() { if (is_unused()) { std->print_cr("Unused {next = %d}", (int) destination()); return; } if (is_nmethod_stub()) { std->print("Nmethod stub "); Disassembler::decode(jump_inst_addr(), state_addr()); nmethod* nm = method(); if (nm) { nm->key.print(); } else { std->print_cr("{not pointing to nmethod}"); } return; } if (is_block_closure_stub()) { std->print("Block closure stub"); Disassembler::decode(jump_inst_addr(), state_addr()); nmethod* nm = block_nmethod(); if (nm) { nm->key.print(); } else { std->print_cr("{not compiled yet}"); } return; } if (is_link()) { std->print_cr("Link for:"); jumpTable::jump_entry_for_at(link(), 0)->print(); return; } fatal("unknown jump table entry"); }
void GC_locker::unlock_concurrent_gc(){ MutexLocker mu(JNICritical_lock); while (1) { jlong old_state = lock_state(); jlong new_state = clear_doing_gc(old_state); if ((old_state = Atomic::cmpxchg(new_state, state_addr(), old_state))) { // clear successful break; } // clear failed, loop around and try again. } JNICritical_lock.notify_all(); }
void GC_locker::jni_lock_slow() { ThreadBlockInVM tbinvm(JavaThread::current(),"JNICritical_lock"); MutexLocker mu(JNICritical_lock); // Block entering threads if we know at least one thread is in a // JNI critical region and we need a GC. // We check that at least one thread is in a critical region before // blocking because blocked threads are woken up by a thread exiting // a JNI critical region. while (1) { jlong old_state = lock_state(); if ( (is_jni_active(old_state) && needs_gc(old_state)) || doing_gc(old_state) ) { JNICritical_lock.wait(); } else { jlong new_state = increment_lock_count(old_state); if ( old_state == Atomic::cmpxchg(new_state, state_addr(), old_state) ) { // lock successful break; } // lock failed, loop around and try again. } } }
void jumpTableEntry::set_destination(char* dest) { *destination_addr() = dest - (int) state_addr(); }
char* jumpTableEntry::destination() const { return *destination_addr() + (int) state_addr(); }
void jumpTableEntry::initialize_block_closure_stub() { fill_entry(jump_instruction, StubRoutines::compile_block_entry() - (int) state_addr(), block_closure_entry); }
void jumpTableEntry::initialize_nmethod_stub(char* dest) { fill_entry(jump_instruction, dest - (int) state_addr(), nmethod_entry); }
void jumpTableEntry::fill_entry(char instr, char* dest, char state) { *jump_inst_addr() = instr; *destination_addr() = dest; *state_addr() = state; }
void GC_locker::jni_unlock_slow() { // There isn't a slow path jni_unlock with GPGC or PGC. assert0((!UseGenPauselessGC)); MutexLocker mu(JNICritical_lock); jlong old_state; jlong new_state; bool do_a_gc; bool do_a_notify; while (1) { do_a_gc = false; do_a_notify = false; old_state = lock_state(); new_state = decrement_lock_count(old_state); if ( needs_gc(new_state) && !is_jni_active(new_state) ) { do_a_notify = true; // GC will also check is_active, so this check is not // strictly needed. It's added here to make it clear that // the GC will NOT be performed if any other caller // of GC_locker::lock() still needs GC locked. if ( (!doing_gc(new_state)) && (!is_active()) ) { do_a_gc = true; new_state = set_doing_gc(new_state); } else { new_state = clear_needs_gc(new_state); } } if ((old_state = Atomic::cmpxchg(new_state, state_addr(), old_state))) { // unlocked successful break; } // unlock failed, loop around and try again. } if ( do_a_gc ) { { // Must give up the lock while at a safepoint MutexUnlocker munlock(JNICritical_lock); Universe::heap()->collect(GCCause::_gc_locker); } // Now that the lock is reaquired, unset _doing_gc and _needs_gc: while (1) { old_state = lock_state(); new_state = clear_needs_gc(clear_doing_gc(old_state)); if ((old_state = Atomic::cmpxchg(new_state, state_addr(), old_state))) { // clear successful break; } // clear failed, loop around and try again. } } if ( do_a_notify ) { JNICritical_lock.notify_all(); } }