static int omap_suspend(struct rproc *rproc, bool force) { struct omap_rproc_priv *rpp = rproc->priv; if (rpp->idle && (force || _may_suspend(rpp))) return _suspend(rpp); return -EBUSY; }
/* * Try to obtain a mutex */ int lthread_mutex_lock(struct lthread_mutex *m) { struct lthread *lt = THIS_LTHREAD; if ((m == NULL) || (m->blocked == NULL)) { DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL)); return POSIX_ERRNO(EINVAL); } /* allow no recursion */ if (m->owner == lt) { DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK)); return POSIX_ERRNO(EDEADLK); } for (;;) { rte_atomic64_inc(&m->count); do { if (rte_atomic64_cmpset ((uint64_t *) &m->owner, 0, (uint64_t) lt)) { /* happy days, we got the lock */ DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0); return 0; } /* spin due to race with unlock when * nothing was blocked */ } while ((rte_atomic64_read(&m->count) == 1) && (m->owner == NULL)); /* queue the current thread in the blocked queue * we defer this to after we return to the scheduler * to ensure that the current thread context is saved * before unlock could result in it being dequeued and * resumed */ DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt); lt->pending_wr_queue = m->blocked; /* now relinquish cpu */ _suspend(); /* resumed, must loop and compete for the lock again */ } return 0; }
/* * Wait on a condition variable */ int lthread_cond_wait(struct lthread_cond *c, __rte_unused uint64_t reserved) { struct lthread *lt = THIS_LTHREAD; if (c == NULL) { DIAG_EVENT(c, LT_DIAG_COND_WAIT, c, POSIX_ERRNO(EINVAL)); return POSIX_ERRNO(EINVAL); } DIAG_EVENT(c, LT_DIAG_COND_WAIT, c, 0); /* queue the current thread in the blocked queue * this will be written when we return to the scheduler * to ensure that the current thread context is saved * before any signal could result in it being dequeued and * resumed */ lt->pending_wr_queue = c->blocked; _suspend(); /* the condition happened */ return 0; }