void _panic(void *caller, const char *fmt, ...) { dprintf(ALWAYS, "panic (frame %p): \n", __GET_FRAME()); dump_frame(__GET_FRAME()); dprintf(ALWAYS, "panic (caller %p): ", caller); va_list ap; va_start(ap, fmt); _dvprintf(fmt, ap); va_end(ap); halt(); }
void BrwLock::Block(bool write) { thread_t* ct = get_current_thread(); if (state_.load(fbl::memory_order_relaxed) & kBrwLockWriter) { thread_t* writer_copy = writer_.load(fbl::memory_order_relaxed); // Boost the writers priority. As we have already registered ourselves as // a waiter and we currently hold the thread_lock there is no race with // a writer performing a release as it will be forced to acquire the // thread_lock prior to deboosting itself. // The check against nullptr here is to resolve the unlikely race in // CommonWriteAcquire. if (writer_copy != nullptr) { bool unused; sched_inherit_priority(writer_copy, ct->effec_priority, &unused); } } zx_status_t ret = write ? wait_.Block(Deadline::infinite()) : wait_.BlockReadLock(Deadline::infinite()); if (unlikely(ret < ZX_OK)) { panic("BrwLock::Block: wait_queue_block returns with error %d lock %p, thr %p, sp %p\n", ret, this, ct, __GET_FRAME()); } }