PRIVATE inline bool NO_INSTRUMENT Switch_lock::set_lock_owner(Context *o) { bool have_no_locks = access_once(&o->_lock_cnt) < 1; if (have_no_locks) { assert_kdb (current_cpu() == o->home_cpu()); for (;;) { if (EXPECT_FALSE(access_once(&o->_running_under_lock))) continue; if (EXPECT_TRUE(mp_cas(&o->_running_under_lock, Mword(false), Mword(true)))) break; } } else assert_kdb (o->_running_under_lock); Mem::mp_wmb(); if (EXPECT_FALSE(!mp_cas(&_lock_owner, Mword(0), Address(o)))) { if (have_no_locks) { Mem::mp_wmb(); write_now(&o->_running_under_lock, Mword(false)); } return false; } return true; }
PRIVATE inline Receiver::Rcv_state Receiver::vcpu_async_ipc(Sender const *sender) const { if (EXPECT_FALSE(state() & Thread_ipc_mask)) return Rs_not_receiving; Vcpu_state *vcpu = vcpu_state().access(); if (EXPECT_FALSE(!vcpu_irqs_enabled(vcpu))) return Rs_not_receiving; Receiver *self = const_cast<Receiver*>(this); if (this == current()) self->spill_user_state(); if (self->vcpu_enter_kernel_mode(vcpu)) vcpu = vcpu_state().access(); LOG_TRACE("VCPU events", "vcpu", this, Vcpu_log, l->type = 1; l->state = vcpu->_saved_state; l->ip = Mword(sender); l->sp = regs()->sp(); l->space = ~0; //vcpu_user_space() ? static_cast<Task*>(vcpu_user_space())->dbg_id() : ~0; );