int vmm_mutex_trylock(struct vmm_mutex *mut) { int ret = 0; struct vmm_vcpu *current_vcpu = vmm_scheduler_current_vcpu(); BUG_ON(!mut); BUG_ON(!vmm_scheduler_orphan_context()); vmm_spin_lock_irq(&mut->wq.lock); if (!mut->lock) { mut->lock++; vmm_manager_vcpu_resource_add(current_vcpu, &mut->res); mut->owner = current_vcpu; ret = 1; } else if (mut->owner == current_vcpu) { /* * If VCPU owning the lock try to acquire it again then let * it acquire lock multiple times (as-per POSIX standard). */ mut->lock++; ret = 1; } vmm_spin_unlock_irq(&mut->wq.lock); return ret; }
static int mutex_lock_common(struct vmm_mutex *mut, u64 *timeout) { int rc = VMM_OK; BUG_ON(!mut); BUG_ON(!vmm_scheduler_orphan_context()); vmm_spin_lock_irq(&mut->wq.lock); while (mut->lock) { rc = __vmm_waitqueue_sleep(&mut->wq, timeout); if (rc) { /* Timeout or some other failure */ break; } } if (rc == VMM_OK) { mut->lock = 1; mut->owner = vmm_scheduler_current_vcpu(); } vmm_spin_unlock_irq(&mut->wq.lock); return rc; }
static int completion_wait_common(struct vmm_completion *cmpl, u64 *timeout) { int rc = VMM_OK; BUG_ON(!cmpl); vmm_spin_lock_irq(&cmpl->wq.lock); if (!cmpl->done) { rc = __vmm_waitqueue_sleep(&cmpl->wq, timeout); } if (rc == VMM_OK) { cmpl->done--; } vmm_spin_unlock_irq(&cmpl->wq.lock); return rc; }
static int completion_wait_common(struct vmm_completion *cmpl, u64 *timeout) { int rc = VMM_OK; BUG_ON(!cmpl); BUG_ON(arch_cpu_irq_disabled()); BUG_ON(!vmm_scheduler_orphan_context()); vmm_spin_lock_irq(&cmpl->wq.lock); if (!cmpl->done) { rc = __vmm_waitqueue_sleep(&cmpl->wq, timeout); } if (cmpl->done) { cmpl->done--; } vmm_spin_unlock_irq(&cmpl->wq.lock); return rc; }