/* * Create a condition variable */ int lthread_cond_init(char *name, struct lthread_cond **cond, __rte_unused const struct lthread_condattr *attr) { struct lthread_cond *c; if (cond == NULL) return POSIX_ERRNO(EINVAL); /* allocate a condition variable from cache */ c = _lthread_objcache_alloc((THIS_SCHED)->cond_cache); if (c == NULL) return POSIX_ERRNO(EAGAIN); c->blocked = _lthread_queue_create("blocked"); if (c->blocked == NULL) { _lthread_objcache_free((THIS_SCHED)->cond_cache, (void *)c); return POSIX_ERRNO(EAGAIN); } if (name == NULL) strncpy(c->name, "no name", sizeof(c->name)); else strncpy(c->name, name, sizeof(c->name)); c->name[sizeof(c->name)-1] = 0; c->root_sched = THIS_SCHED; (*cond) = c; DIAG_CREATE_EVENT((*cond), LT_DIAG_COND_CREATE); return 0; }
/* * Unlock a mutex */ int lthread_mutex_unlock(struct lthread_mutex *m) { struct lthread *lt = THIS_LTHREAD; struct lthread *unblocked; if ((m == NULL) || (m->blocked == NULL)) { DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EINVAL)); return POSIX_ERRNO(EINVAL); } /* fail if its owned */ if (m->owner != lt || m->owner == NULL) { DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, POSIX_ERRNO(EPERM)); return POSIX_ERRNO(EPERM); } rte_atomic64_dec(&m->count); /* if there are blocked threads then make one ready */ while (rte_atomic64_read(&m->count) > 0) { unblocked = _lthread_queue_remove(m->blocked); if (unblocked != NULL) { rte_atomic64_dec(&m->count); DIAG_EVENT(m, LT_DIAG_MUTEX_UNLOCKED, m, unblocked); RTE_ASSERT(unblocked->sched != NULL); _ready_queue_insert((struct lthread_sched *) unblocked->sched, unblocked); break; } } /* release the lock */ m->owner = NULL; return 0; }
/* * Broadcast a condition variable */ int lthread_cond_broadcast(struct lthread_cond *c) { struct lthread *lt; if (c == NULL) { DIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, POSIX_ERRNO(EINVAL)); return POSIX_ERRNO(EINVAL); } DIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, 0); do { /* drain the queue waking everybody */ lt = _lthread_queue_remove(c->blocked); if (lt != NULL) { DIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, lt); /* wake up */ _ready_queue_insert((struct lthread_sched *)lt->sched, lt); } } while (!_lthread_queue_empty(c->blocked)); _reschedule(); DIAG_EVENT(c, LT_DIAG_COND_BROADCAST, c, 0); return 0; }
/* * Create a key * this means getting a key from the the pool */ int lthread_key_create(unsigned int *key, tls_destructor_func destructor) { if (key == NULL) return POSIX_ERRNO(EINVAL); struct lthread_key *new_key; if (rte_ring_mc_dequeue((struct rte_ring *)key_pool, (void **)&new_key) == 0) { new_key->destructor = destructor; *key = (new_key - key_table); return 0; } return POSIX_ERRNO(EAGAIN); }
/* * Signal a condition variable * attempt to resume any blocked thread */ int lthread_cond_signal(struct lthread_cond *c) { struct lthread *lt; if (c == NULL) { DIAG_EVENT(c, LT_DIAG_COND_SIGNAL, c, POSIX_ERRNO(EINVAL)); return POSIX_ERRNO(EINVAL); } lt = _lthread_queue_remove(c->blocked); if (lt != NULL) { /* okay wake up this thread */ DIAG_EVENT(c, LT_DIAG_COND_SIGNAL, c, lt); _ready_queue_insert((struct lthread_sched *)lt->sched, lt); } return 0; }
/* * Try to obtain a mutex */ int lthread_mutex_lock(struct lthread_mutex *m) { struct lthread *lt = THIS_LTHREAD; if ((m == NULL) || (m->blocked == NULL)) { DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EINVAL)); return POSIX_ERRNO(EINVAL); } /* allow no recursion */ if (m->owner == lt) { DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, POSIX_ERRNO(EDEADLK)); return POSIX_ERRNO(EDEADLK); } for (;;) { rte_atomic64_inc(&m->count); do { if (rte_atomic64_cmpset ((uint64_t *) &m->owner, 0, (uint64_t) lt)) { /* happy days, we got the lock */ DIAG_EVENT(m, LT_DIAG_MUTEX_LOCK, m, 0); return 0; } /* spin due to race with unlock when * nothing was blocked */ } while ((rte_atomic64_read(&m->count) == 1) && (m->owner == NULL)); /* queue the current thread in the blocked queue * we defer this to after we return to the scheduler * to ensure that the current thread context is saved * before unlock could result in it being dequeued and * resumed */ DIAG_EVENT(m, LT_DIAG_MUTEX_BLOCKED, m, lt); lt->pending_wr_queue = m->blocked; /* now relinquish cpu */ _suspend(); /* resumed, must loop and compete for the lock again */ } return 0; }
/* * Destroy a condition variable */ int lthread_cond_destroy(struct lthread_cond *c) { if (c == NULL) { DIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, POSIX_ERRNO(EINVAL)); return POSIX_ERRNO(EINVAL); } /* try to free it */ if (_lthread_queue_destroy(c->blocked) < 0) { /* queue in use */ DIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, POSIX_ERRNO(EBUSY)); return POSIX_ERRNO(EBUSY); } /* okay free it */ _lthread_objcache_free(c->root_sched->cond_cache, c); DIAG_EVENT(c, LT_DIAG_COND_DESTROY, c, 0); return 0; }
/* try to lock a mutex but don't block */ int lthread_mutex_trylock(struct lthread_mutex *m) { struct lthread *lt = THIS_LTHREAD; if ((m == NULL) || (m->blocked == NULL)) { DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EINVAL)); return POSIX_ERRNO(EINVAL); } if (m->owner == lt) { /* no recursion */ DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EDEADLK)); return POSIX_ERRNO(EDEADLK); } rte_atomic64_inc(&m->count); if (rte_atomic64_cmpset ((uint64_t *) &m->owner, (uint64_t) NULL, (uint64_t) lt)) { /* got the lock */ DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, 0); return 0; } /* failed so return busy */ rte_atomic64_dec(&m->count); DIAG_EVENT(m, LT_DIAG_MUTEX_TRYLOCK, m, POSIX_ERRNO(EBUSY)); return POSIX_ERRNO(EBUSY); }
/* * migrate the current thread to another scheduler running * on the specified lcore. */ int lthread_set_affinity(unsigned lcoreid) { struct lthread *lt = THIS_LTHREAD; struct lthread_sched *dest_sched; if (unlikely(lcoreid >= LTHREAD_MAX_LCORES)) return POSIX_ERRNO(EINVAL); DIAG_EVENT(lt, LT_DIAG_LTHREAD_AFFINITY, lcoreid, 0); dest_sched = schedcore[lcoreid]; if (unlikely(dest_sched == NULL)) return POSIX_ERRNO(EINVAL); if (likely(dest_sched != THIS_SCHED)) { lt->sched = dest_sched; lt->pending_wr_queue = dest_sched->pready; _affinitize(); return 0; } return 0; }
/* * Delete a key */ int lthread_key_delete(unsigned int k) { struct lthread_key *key; key = (struct lthread_key *) &key_table[k]; if (k > LTHREAD_MAX_KEYS) return POSIX_ERRNO(EINVAL); key->destructor = NULL; rte_ring_mp_enqueue((struct rte_ring *)key_pool, (void *)key); return 0; }
/* * Wait on a condition variable */ int lthread_cond_wait(struct lthread_cond *c, __rte_unused uint64_t reserved) { struct lthread *lt = THIS_LTHREAD; if (c == NULL) { DIAG_EVENT(c, LT_DIAG_COND_WAIT, c, POSIX_ERRNO(EINVAL)); return POSIX_ERRNO(EINVAL); } DIAG_EVENT(c, LT_DIAG_COND_WAIT, c, 0); /* queue the current thread in the blocked queue * this will be written when we return to the scheduler * to ensure that the current thread context is saved * before any signal could result in it being dequeued and * resumed */ lt->pending_wr_queue = c->blocked; _suspend(); /* the condition happened */ return 0; }
/* * Create a mutex */ int lthread_mutex_init(char *name, struct lthread_mutex **mutex, __rte_unused const struct lthread_mutexattr *attr) { struct lthread_mutex *m; if (mutex == NULL) return POSIX_ERRNO(EINVAL); m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache); if (m == NULL) return POSIX_ERRNO(EAGAIN); m->blocked = _lthread_queue_create("blocked queue"); if (m->blocked == NULL) { _lthread_objcache_free((THIS_SCHED)->mutex_cache, m); return POSIX_ERRNO(EAGAIN); } if (name == NULL) strncpy(m->name, "no name", sizeof(m->name)); else strncpy(m->name, name, sizeof(m->name)); m->name[sizeof(m->name)-1] = 0; m->root_sched = THIS_SCHED; m->owner = NULL; rte_atomic64_init(&m->count); DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE); /* success */ (*mutex) = m; return 0; }
/* * Set a value against a key * If the key is no longer valid return an error * when storing value */ int lthread_setspecific(unsigned int k, const void *data) { if (k > LTHREAD_MAX_KEYS) return POSIX_ERRNO(EINVAL); int n = THIS_LTHREAD->tls->nb_keys_inuse; /* discard const qualifier */ char *p = (char *) (uintptr_t) data; if (data != NULL) { if (THIS_LTHREAD->tls->data[k] == NULL) THIS_LTHREAD->tls->nb_keys_inuse = n+1; } THIS_LTHREAD->tls->data[k] = (void *) p; return 0; }
/* * Destroy a mutex */ int lthread_mutex_destroy(struct lthread_mutex *m) { if ((m == NULL) || (m->blocked == NULL)) { DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EINVAL)); return POSIX_ERRNO(EINVAL); } if (m->owner == NULL) { /* try to delete the blocked queue */ if (_lthread_queue_destroy(m->blocked) < 0) { DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY)); return POSIX_ERRNO(EBUSY); } /* free the mutex to cache */ _lthread_objcache_free(m->root_sched->mutex_cache, m); DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, 0); return 0; } /* can't do its still in use */ DIAG_EVENT(m, LT_DIAG_MUTEX_DESTROY, m, POSIX_ERRNO(EBUSY)); return POSIX_ERRNO(EBUSY); }