int odp_ticketlock_trylock(odp_ticketlock_t *tklock) { /* We read 'next_ticket' and 'cur_ticket' non-atomically which should * not be a problem as they are not independent of each other. * 'cur_ticket' is always <= to 'next_ticket' and if we see an * older value of 'cur_ticket', this only means the lock will * look busy and trylock will fail. */ uint32_t next = odp_atomic_load_u32(&tklock->next_ticket); uint32_t cur = odp_atomic_load_u32(&tklock->cur_ticket); /* First check that lock is available and possible to take without * spinning. */ if (next == cur) { /* Then try to take the lock by incrementing 'next_ticket' * but only if it still has the original value which is * equal to 'cur_ticket'. * We don't have to include 'cur_ticket' in the comparison * because it cannot be larger than 'next_ticket' (only * smaller if the lock is busy). * If CAS fails, it means some other thread intercepted and * took a ticket which means the lock is not available * anymore */ if (odp_atomic_cas_acq_u32(&tklock->next_ticket, &next, next + 1)) return 1; } return 0; }
void odp_rwlock_read_lock(odp_rwlock_t *rwlock) { uint32_t cnt; int is_locked = 0; while (is_locked == 0) { cnt = odp_atomic_load_u32(&rwlock->cnt); /* waiting for read lock */ if ((int32_t)cnt < 0) { odp_cpu_pause(); continue; } is_locked = odp_atomic_cas_acq_u32(&rwlock->cnt, &cnt, cnt + 1); } }
void odp_rwlock_write_lock(odp_rwlock_t *rwlock) { uint32_t cnt; int is_locked = 0; while (is_locked == 0) { uint32_t zero = 0; cnt = odp_atomic_load_u32(&rwlock->cnt); /* lock acquired, wait */ if (cnt != 0) { odp_cpu_pause(); continue; } is_locked = odp_atomic_cas_acq_u32(&rwlock->cnt, &zero, (uint32_t)-1); } }