gpr_mpscq_node *gpr_mpscq_pop(gpr_mpscq *q) { gpr_mpscq_node *tail = q->tail; gpr_mpscq_node *next = (gpr_mpscq_node *)gpr_atm_acq_load(&tail->next); if (tail == &q->stub) { // indicates the list is actually (ephemerally) empty if (next == NULL) return NULL; q->tail = next; tail = next; next = (gpr_mpscq_node *)gpr_atm_acq_load(&tail->next); } if (next != NULL) { q->tail = next; return tail; } gpr_mpscq_node *head = (gpr_mpscq_node *)gpr_atm_acq_load(&q->head); if (tail != head) { // indicates a retry is in order: we're still adding return NULL; } gpr_mpscq_push(q, &q->stub); next = (gpr_mpscq_node *)gpr_atm_acq_load(&tail->next); if (next != NULL) { q->tail = next; return tail; } // indicates a retry is in order: we're still adding return NULL; }
static void test_thread(void *args) { thd_args *a = args; gpr_event_wait(a->start, gpr_inf_future(GPR_CLOCK_REALTIME)); for (size_t i = 1; i <= THREAD_ITERATIONS; i++) { gpr_mpscq_push(a->q, &new_node(i, &a->ctr)->node); } }
void grpc_workqueue_enqueue(grpc_exec_ctx *exec_ctx, grpc_workqueue *workqueue, grpc_closure *closure, grpc_error *error) { GPR_TIMER_BEGIN("workqueue.enqueue", 0); gpr_atm last = gpr_atm_full_fetch_add(&workqueue->state, 2); GPR_ASSERT(last & 1); closure->error = error; gpr_mpscq_push(&workqueue->queue, &closure->next_data.atm_next); if (last == 1) { wakeup(exec_ctx, workqueue); } GPR_TIMER_END("workqueue.enqueue", 0); }
static void test_serial(void) { gpr_log(GPR_DEBUG, "test_serial"); gpr_mpscq q; gpr_mpscq_init(&q); for (size_t i = 0; i < 10000000; i++) { gpr_mpscq_push(&q, &new_node(i, NULL)->node); } for (size_t i = 0; i < 10000000; i++) { test_node *n = (test_node *)gpr_mpscq_pop(&q); GPR_ASSERT(n); GPR_ASSERT(n->i == i); gpr_free(n); } }
static bool cq_event_queue_push(grpc_cq_event_queue *q, grpc_cq_completion *c) { gpr_mpscq_push(&q->queue, (gpr_mpscq_node *)c); return gpr_atm_no_barrier_fetch_add(&q->num_queue_items, 1) == 0; }