/* * Unlike a traditional lock-less linked list; the availability of a * rseq primitive allows us to implement pop without concerns over * ABA-type races. */ struct percpu_list_node *this_cpu_list_pop(struct percpu_list *list, int *_cpu) { for (;;) { struct percpu_list_node *head; intptr_t *targetptr, expectnot, *load; off_t offset; int ret, cpu; cpu = rseq_cpu_start(); targetptr = (intptr_t *)&list->c[cpu].head; expectnot = (intptr_t)NULL; offset = offsetof(struct percpu_list_node, next); load = (intptr_t *)&head; ret = rseq_cmpnev_storeoffp_load(targetptr, expectnot, offset, load, cpu); if (rseq_likely(!ret)) { if (_cpu) *_cpu = cpu; return head; } if (ret > 0) return NULL; /* Retry if rseq aborts. */ } }
/* A simple percpu spinlock. Returns the cpu lock was acquired on. */ int rseq_this_cpu_lock(struct percpu_lock *lock) { int cpu; for (;;) { int ret; cpu = rseq_cpu_start(); ret = rseq_cmpeqv_storev(&lock->c[cpu].v, 0, 1, cpu); if (rseq_likely(!ret)) break; /* Retry if comparison fails or rseq aborts. */ } /* * Acquire semantic when taking lock after control dependency. * Matches rseq_smp_store_release(). */ rseq_smp_acquire__after_ctrl_dep(); return cpu; }
void this_cpu_list_push(struct percpu_list *list, struct percpu_list_node *node, int *_cpu) { int cpu; for (;;) { intptr_t *targetptr, newval, expect; int ret; cpu = rseq_cpu_start(); /* Load list->c[cpu].head with single-copy atomicity. */ expect = (intptr_t)RSEQ_READ_ONCE(list->c[cpu].head); newval = (intptr_t)node; targetptr = (intptr_t *)&list->c[cpu].head; node->next = (struct percpu_list_node *)expect; ret = rseq_cmpeqv_storev(targetptr, expect, newval, cpu); if (rseq_likely(!ret)) break; /* Retry if comparison fails or rseq aborts. */ } if (_cpu) *_cpu = cpu; }
int main(int argc, char **argv) { int cpu, ret; if (rseq_register_current_thread()) abort(); cpu = rseq_cpu_start(); ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; ret = rseq_addv(&v, 1, cpu); if (ret) goto end; end: if (rseq_unregister_current_thread()) abort(); printf("total %" PRIdPTR "\n", v); return 0; }