int ringbuf_push(const char *value, size_t len) { static pid_t pid = 0; if (unlikely(len > shm->size)) return -1; if (unlikely(pid == 0)) pid = getpid(); shmtx_lock(&mutex_w, pid); uint32_t index_w = shm->index_w; uint32_t next = Next_Slot(index_w, shm->max); if (next == shm->index_r) { shmtx_unlock(&mutex_w, pid); return Cache_Full; } memcpy(shm->datas + index_w * shm->size, value, len); atomic_cmp_set(&shm->index_w, index_w, next); if (!shmtx_unlock(&mutex_w, pid)) { #ifdef RB_DEBUG printf("push lock exception! %lu\n", *mutex_w.lock); #endif } return 0; }
int ringbuf_pop(char *value) { static pid_t pid = 0; if (unlikely(pid == 0)) pid = getpid(); shmtx_lock(&mutex_r, pid); uint32_t index_r = shm->index_r; if (shm->index_w == index_r) { shmtx_unlock(&mutex_r, pid); return No_Data; } uint32_t next = Next_Slot(index_r, shm->max); memcpy(value, shm->datas + index_r * shm->size, shm->size); atomic_cmp_set(&shm->index_r, index_r, next); if (!shmtx_unlock(&mutex_r, pid)) { #ifdef RB_DEBUG printf("pop lock exception! %lu \n", *mutex_r.lock); #endif } return 0; }
void spinlock_lock(atomic_t* locker) { int i, count = 0, n = 0; for (;;++count) { if (*locker == 0 && atomic_cmp_set(locker, 0, 1)) { return; } if (sys_cpus > 1) { for (n = 1; n < 16; n <<= 1) { for (i = 0; i < n; i++) { pause(); } if (*locker == 0 && atomic_cmp_set(locker, 0, 1)) { return; } } } sched_yield(); } }
bool spinlock_trylock(atomic_t* locker) { return (*(locker) == 0 && atomic_cmp_set(locker, 0, 1)); }