PRIVATE inline bool NO_INSTRUMENT Switch_lock::set_lock_owner(Context *o) { bool have_no_locks = access_once(&o->_lock_cnt) < 1; if (have_no_locks) { assert_kdb (current_cpu() == o->home_cpu()); for (;;) { if (EXPECT_FALSE(access_once(&o->_running_under_lock))) continue; if (EXPECT_TRUE(mp_cas(&o->_running_under_lock, Mword(false), Mword(true)))) break; } } else assert_kdb (o->_running_under_lock); Mem::mp_wmb(); if (EXPECT_FALSE(!mp_cas(&_lock_owner, Mword(0), Address(o)))) { if (have_no_locks) { Mem::mp_wmb(); write_now(&o->_running_under_lock, Mword(false)); } return false; } return true; }
PUBLIC void NO_INSTRUMENT Switch_lock::wait_free() { auto guard = lock_guard(cpu_lock); assert (!valid()); // have we already the lock? if ((_lock_owner & ~1UL) == (Address)current()) { clear_lock_owner(); Context *c = current(); c->dec_lock_cnt(); return; } for(;;) { assert(cpu_lock.test()); Address _owner = access_once(&_lock_owner); Context *owner = (Context *)(_owner & ~1UL); if (!owner) break; // Help lock owner until lock becomes free // while (test()) current()->switch_exec_helping(owner, Context::Helping, &_lock_owner, _owner); Proc::preemption_point(); } }
PUBLIC void Irq_sender::destroy(Kobject ***rl) { auto g = lock_guard(cpu_lock); auto t = access_once(&_irq_thread); if (t) free(t, rl); Irq::destroy(rl); }
extern "C" int ebbos_gthread_once(__gthread_once_t *once, void (*func) (void)) { __gthread_once_t val = __sync_val_compare_and_swap(once, 0, 1); if (val == 0) { func(); __sync_synchronize(); *once = reinterpret_cast<__gthread_once_t>(2); } else { while (access_once(*once) != reinterpret_cast<__gthread_once_t>(2)) ; } return 0; }
/** * \pre user_vmcb must be a uniqe address across all address spaces * (e.g., a kernel KU-mem address) */ PUBLIC inline Vmcb * Svm::kernel_vmcb(Vmcb const *user_vmcb) { if (user_vmcb != _last_user_vmcb) { _kernel_vmcb->control_area.clean_bits.raw = 0; _last_user_vmcb = user_vmcb; } else _kernel_vmcb->control_area.clean_bits = access_once(&user_vmcb->control_area.clean_bits); return _kernel_vmcb; }
Switch_lock::Status NO_INSTRUMENT Switch_lock::lock_dirty() { assert(cpu_lock.test()); if (!valid()) return Invalid; // have we already the lock? if ((_lock_owner & ~1UL) == Address(current())) return Locked; do { for (;;) { Mword o = access_once(&_lock_owner); if (o & 1) return Invalid; if (!o) break; // Help lock owner until lock becomes free // while (test()) Context *c = current(); if ( c->switch_exec_helping((Context *)o, Context::Helping, &_lock_owner, o) == Context::Switch::Failed && c->home_cpu() != current_cpu()) c->schedule(); Proc::preemption_point(); if (!valid()) return Invalid; } } while (!set_lock_owner(current())); Mem::mp_wmb(); current()->inc_lock_cnt(); // Do not lose this lock if current is deleted return Not_locked; }
Attr attribs() const { typedef L4_fpage::Rights R; typedef Page::Type T; auto raw = access_once(e); R r = R::UR(); if (raw & 2) r |= R::W(); if (raw & 4) r |= R::X(); T t; switch (raw & 0x38) { case (0 << 3): t = T::Uncached(); break; case (1 << 3): t = T::Buffered(); break; default: case (6 << 3): t = T::Normal(); break; } return Attr(r, t); }
bool add_attribs(Page::Attr attr) { typedef L4_fpage::Rights R; if (attr.rights & R::WX()) { Unsigned64 a = 0; if (attr.rights & R::W()) a = 2; if (attr.rights & R::X()) a |= 4; auto p = access_once(e); auto o = p; p |= a; if (o != p) { write_now(e, p); return true; } } return false; }