bool VM::wakeup(STATE) { utilities::thread::SpinLock::LockGuard guard(interrupt_lock_); set_check_local_interrupts(); Object* wait = waiting_object_.get(); if(park_->parked_p()) { park_->unpark(); return true; } else if(interrupt_with_signal_) { #ifdef RBX_WINDOWS // TODO: wake up the thread #else pthread_kill(os_thread_, SIGVTALRM); #endif interrupt_lock_.unlock(); // Wakeup any locks hanging around with contention memory()->release_contention(state); return true; } else if(!wait->nil_p()) { // We shouldn't hold the VM lock and the IH lock at the same time, // other threads can grab them and deadlock. InflatedHeader* ih = wait->inflated_header(state); interrupt_lock_.unlock(); ih->wakeup(state, wait); return true; } else { Channel* chan = waiting_channel_.get(); if(!chan->nil_p()) { interrupt_lock_.unlock(); memory()->release_contention(state); chan->send(state, cNil); return true; } else if(custom_wakeup_) { interrupt_lock_.unlock(); memory()->release_contention(state); (*custom_wakeup_)(custom_wakeup_data_); return true; } return false; } }
bool VM::wakeup(STATE, GCToken gct, CallFrame* call_frame) { SYNC(state); set_check_local_interrupts(); Object* wait = waiting_object_.get(); if(park_->parked_p()) { park_->unpark(); return true; } else if(vm_jit_.interrupt_with_signal_) { #ifdef RBX_WINDOWS // TODO: wake up the thread #else pthread_kill(os_thread_, SIGVTALRM); #endif UNSYNC; // Wakeup any locks hanging around with contention om->release_contention(state, gct, call_frame); return true; } else if(!wait->nil_p()) { // We shouldn't hold the VM lock and the IH lock at the same time, // other threads can grab them and deadlock. InflatedHeader* ih = wait->inflated_header(state); UNSYNC; ih->wakeup(state, gct, call_frame, wait); return true; } else { Channel* chan = waiting_channel_.get(); if(!chan->nil_p()) { UNSYNC; om->release_contention(state, gct, call_frame); chan->send(state, gct, cNil, call_frame); return true; } else if(custom_wakeup_) { UNSYNC; om->release_contention(state, gct, call_frame); (*custom_wakeup_)(custom_wakeup_data_); return true; } return false; } }
bool VM::check_thread_raise_or_kill(STATE) { Exception* exc = interrupted_exception(); if(!exc->nil_p()) { clear_interrupted_exception(); // Only write the locations if there are none. if(exc->locations()->nil_p() || exc->locations()->size() == 0) { exc->locations(this, Location::from_call_stack(state)); } thread_state_.raise_exception(exc); return true; } if(interrupt_by_kill()) { Fiber* fib = current_fiber.get(); if(fib->nil_p() || fib->root_p()) { clear_interrupt_by_kill(); } else { set_check_local_interrupts(); } thread_state_.raise_thread_kill(); return true; } // If the current thread is trying to step, debugger wise, then assist! if(thread_step()) { clear_thread_step(); if(!Helpers::yield_debugger(state, cNil)) return true; } return false; }
void VM::register_kill(STATE) { utilities::thread::SpinLock::LockGuard guard(interrupt_lock_); set_interrupt_by_kill(); set_check_local_interrupts(); }
void VM::register_raise(STATE, Exception* exc) { utilities::thread::SpinLock::LockGuard guard(interrupt_lock_); interrupted_exception_.set(exc); set_check_local_interrupts(); }
void set_thread_step() { set_check_local_interrupts(); vm_jit_.thread_step_ = true; }
void VM::register_kill(STATE) { SYNC(state); set_interrupt_by_kill(); set_check_local_interrupts(); }
void VM::register_raise(STATE, Exception* exc) { SYNC(state); interrupted_exception_.set(exc); set_check_local_interrupts(); }