/* * Create a condition variable */ int lthread_cond_init(char *name, struct lthread_cond **cond, __rte_unused const struct lthread_condattr *attr) { struct lthread_cond *c; if (cond == NULL) return POSIX_ERRNO(EINVAL); /* allocate a condition variable from cache */ c = _lthread_objcache_alloc((THIS_SCHED)->cond_cache); if (c == NULL) return POSIX_ERRNO(EAGAIN); c->blocked = _lthread_queue_create("blocked"); if (c->blocked == NULL) { _lthread_objcache_free((THIS_SCHED)->cond_cache, (void *)c); return POSIX_ERRNO(EAGAIN); } if (name == NULL) strncpy(c->name, "no name", sizeof(c->name)); else strncpy(c->name, name, sizeof(c->name)); c->name[sizeof(c->name)-1] = 0; c->root_sched = THIS_SCHED; (*cond) = c; DIAG_CREATE_EVENT((*cond), LT_DIAG_COND_CREATE); return 0; }
/* * Allocate data for TLS cache */ void _lthread_tls_alloc(struct lthread *lt) { struct lthread_tls *tls; tls = _lthread_objcache_alloc((THIS_SCHED)->tls_cache); LTHREAD_ASSERT(tls != NULL); tls->root_sched = (THIS_SCHED); lt->tls = tls; /* allocate data for TLS varaiables using RTE_PER_LTHREAD macros */ if (sizeof(void *) < (uint64_t)RTE_PER_LTHREAD_SECTION_SIZE) { lt->per_lthread_data = _lthread_objcache_alloc((THIS_SCHED)->per_lthread_cache); } }
/* * Create a mutex */ int lthread_mutex_init(char *name, struct lthread_mutex **mutex, __rte_unused const struct lthread_mutexattr *attr) { struct lthread_mutex *m; if (mutex == NULL) return POSIX_ERRNO(EINVAL); m = _lthread_objcache_alloc((THIS_SCHED)->mutex_cache); if (m == NULL) return POSIX_ERRNO(EAGAIN); m->blocked = _lthread_queue_create("blocked queue"); if (m->blocked == NULL) { _lthread_objcache_free((THIS_SCHED)->mutex_cache, m); return POSIX_ERRNO(EAGAIN); } if (name == NULL) strncpy(m->name, "no name", sizeof(m->name)); else strncpy(m->name, name, sizeof(m->name)); m->name[sizeof(m->name)-1] = 0; m->root_sched = THIS_SCHED; m->owner = NULL; rte_atomic64_init(&m->count); DIAG_CREATE_EVENT(m, LT_DIAG_MUTEX_CREATE); /* success */ (*mutex) = m; return 0; }