static inline void decrementcounter(isc_stats_t *stats, int counter) { isc_int32_t prev; #ifdef ISC_RWLOCK_USEATOMIC isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read); #endif #if ISC_STATS_USEMULTIFIELDS prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, -1); if (prev == 0) isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, -1); #elif defined(ISC_PLATFORM_HAVEXADDQ) UNUSED(prev); isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], -1); #else UNUSED(prev); stats->counters[counter]--; #endif #ifdef ISC_RWLOCK_USEATOMIC isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read); #endif }
isc_result_t isc_rwlock_tryupgrade(isc_rwlock_t *rwl) { isc_int32_t prevcnt; REQUIRE(VALID_RWLOCK(rwl)); /* Try to acquire write access. */ prevcnt = isc_atomic_cmpxchg(&rwl->cnt_and_flag, READER_INCR, WRITER_ACTIVE); /* * There must have been no writer, and there must have been at least * one reader. */ INSIST((prevcnt & WRITER_ACTIVE) == 0 && (prevcnt & ~WRITER_ACTIVE) != 0); if (prevcnt == READER_INCR) { /* * We are the only reader and have been upgraded. * Now jump into the head of the writer waiting queue. */ (void)isc_atomic_xadd(&rwl->write_completions, -1); } else return (ISC_R_LOCKBUSY); return (ISC_R_SUCCESS); }
void isc_rwlock_downgrade(isc_rwlock_t *rwl) { isc_int32_t prev_readers; REQUIRE(VALID_RWLOCK(rwl)); /* Become an active reader. */ prev_readers = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR); /* We must have been a writer. */ INSIST((prev_readers & WRITER_ACTIVE) != 0); /* Complete write */ (void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE); (void)isc_atomic_xadd(&rwl->write_completions, 1); /* Resume other readers */ LOCK(&rwl->lock); if (rwl->readers_waiting > 0) BROADCAST(&rwl->readable); UNLOCK(&rwl->lock); }
static inline void incrementcounter(isc_stats_t *stats, int counter) { isc_int32_t prev; #ifdef ISC_RWLOCK_USEATOMIC /* * We use a "read" lock to prevent other threads from reading the * counter while we "writing" a counter field. The write access itself * is protected by the atomic operation. */ isc_rwlock_lock(&stats->counterlock, isc_rwlocktype_read); #endif #if ISC_STATS_USEMULTIFIELDS prev = isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].lo, 1); /* * If the lower 32-bit field overflows, increment the higher field. * Note that it's *theoretically* possible that the lower field * overlaps again before the higher field is incremented. It doesn't * matter, however, because we don't read the value until * isc_stats_copy() is called where the whole process is protected * by the write (exclusive) lock. */ if (prev == (isc_int32_t)0xffffffff) isc_atomic_xadd((isc_int32_t *)&stats->counters[counter].hi, 1); #elif defined(ISC_PLATFORM_HAVEXADDQ) UNUSED(prev); isc_atomic_xaddq((isc_int64_t *)&stats->counters[counter], 1); #else UNUSED(prev); stats->counters[counter]++; #endif #ifdef ISC_RWLOCK_USEATOMIC isc_rwlock_unlock(&stats->counterlock, isc_rwlocktype_read); #endif }
void do_xadd(isc_task_t *task, isc_event_t *ev) { counter_t *state = (counter_t *)ev->ev_arg; int i; for (i = 0 ; i < COUNTS_PER_ITERATION ; i++) { isc_atomic_xadd(&counter_32, 1); } state->iteration++; if (state->iteration < ITERATIONS) { isc_task_send(task, &ev); } else { isc_event_free(&ev); } }
isc_result_t isc_rwlock_unlock(isc_rwlock_t *rwl, isc_rwlocktype_t type) { isc_int32_t prev_cnt; REQUIRE(VALID_RWLOCK(rwl)); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PREUNLOCK, "preunlock"), rwl, type); #endif if (type == isc_rwlocktype_read) { prev_cnt = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR); /* * If we're the last reader and any writers are waiting, wake * them up. We need to wake up all of them to ensure the * FIFO order. */ if (prev_cnt == READER_INCR && rwl->write_completions != rwl->write_requests) { LOCK(&rwl->lock); BROADCAST(&rwl->writeable); UNLOCK(&rwl->lock); } } else { isc_boolean_t wakeup_writers = ISC_TRUE; /* * Reset the flag, and (implicitly) tell other writers * we are done. */ (void)isc_atomic_xadd(&rwl->cnt_and_flag, -WRITER_ACTIVE); (void)isc_atomic_xadd(&rwl->write_completions, 1); if (rwl->write_granted >= rwl->write_quota || rwl->write_requests == rwl->write_completions || (rwl->cnt_and_flag & ~WRITER_ACTIVE) != 0) { /* * We have passed the write quota, no writer is * waiting, or some readers are almost ready, pending * possible writers. Note that the last case can * happen even if write_requests != write_completions * (which means a new writer in the queue), so we need * to catch the case explicitly. */ LOCK(&rwl->lock); if (rwl->readers_waiting > 0) { wakeup_writers = ISC_FALSE; BROADCAST(&rwl->readable); } UNLOCK(&rwl->lock); } if (rwl->write_requests != rwl->write_completions && wakeup_writers) { LOCK(&rwl->lock); BROADCAST(&rwl->writeable); UNLOCK(&rwl->lock); } } #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTUNLOCK, "postunlock"), rwl, type); #endif return (ISC_R_SUCCESS); }
isc_result_t isc_rwlock_trylock(isc_rwlock_t *rwl, isc_rwlocktype_t type) { isc_int32_t cntflag; REQUIRE(VALID_RWLOCK(rwl)); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PRELOCK, "prelock"), rwl, type); #endif if (type == isc_rwlocktype_read) { /* If a writer is waiting or working, we fail. */ if (rwl->write_requests != rwl->write_completions) return (ISC_R_LOCKBUSY); /* Otherwise, be ready for reading. */ cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR); if ((cntflag & WRITER_ACTIVE) != 0) { /* * A writer is working. We lose, and cancel the read * request. */ cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, -READER_INCR); /* * If no other readers are waiting and we've suspended * new writers in this short period, wake them up. */ if (cntflag == READER_INCR && rwl->write_completions != rwl->write_requests) { LOCK(&rwl->lock); BROADCAST(&rwl->writeable); UNLOCK(&rwl->lock); } return (ISC_R_LOCKBUSY); } } else { /* Try locking without entering the waiting queue. */ cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0, WRITER_ACTIVE); if (cntflag != 0) return (ISC_R_LOCKBUSY); /* * XXXJT: jump into the queue, possibly breaking the writer * order. */ (void)isc_atomic_xadd(&rwl->write_completions, -1); rwl->write_granted++; } #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTLOCK, "postlock"), rwl, type); #endif return (ISC_R_SUCCESS); }
static isc_result_t isc__rwlock_lock(isc_rwlock_t *rwl, isc_rwlocktype_t type) { isc_int32_t cntflag; REQUIRE(VALID_RWLOCK(rwl)); #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_PRELOCK, "prelock"), rwl, type); #endif if (type == isc_rwlocktype_read) { if (rwl->write_requests != rwl->write_completions) { /* there is a waiting or active writer */ LOCK(&rwl->lock); if (rwl->write_requests != rwl->write_completions) { rwl->readers_waiting++; WAIT(&rwl->readable, &rwl->lock); rwl->readers_waiting--; } UNLOCK(&rwl->lock); } cntflag = isc_atomic_xadd(&rwl->cnt_and_flag, READER_INCR); POST(cntflag); while (1) { if ((rwl->cnt_and_flag & WRITER_ACTIVE) == 0) break; /* A writer is still working */ LOCK(&rwl->lock); rwl->readers_waiting++; if ((rwl->cnt_and_flag & WRITER_ACTIVE) != 0) WAIT(&rwl->readable, &rwl->lock); rwl->readers_waiting--; UNLOCK(&rwl->lock); /* * Typically, the reader should be able to get a lock * at this stage: * (1) there should have been no pending writer when * the reader was trying to increment the * counter; otherwise, the writer should be in * the waiting queue, preventing the reader from * proceeding to this point. * (2) once the reader increments the counter, no * more writer can get a lock. * Still, it is possible another writer can work at * this point, e.g. in the following scenario: * A previous writer unlocks the writer lock. * This reader proceeds to point (1). * A new writer appears, and gets a new lock before * the reader increments the counter. * The reader then increments the counter. * The previous writer notices there is a waiting * reader who is almost ready, and wakes it up. * So, the reader needs to confirm whether it can now * read explicitly (thus we loop). Note that this is * not an infinite process, since the reader has * incremented the counter at this point. */ } /* * If we are temporarily preferred to writers due to the writer * quota, reset the condition (race among readers doesn't * matter). */ rwl->write_granted = 0; } else { isc_int32_t prev_writer; /* enter the waiting queue, and wait for our turn */ prev_writer = isc_atomic_xadd(&rwl->write_requests, 1); while (rwl->write_completions != prev_writer) { LOCK(&rwl->lock); if (rwl->write_completions != prev_writer) { WAIT(&rwl->writeable, &rwl->lock); UNLOCK(&rwl->lock); continue; } UNLOCK(&rwl->lock); break; } while (1) { cntflag = isc_atomic_cmpxchg(&rwl->cnt_and_flag, 0, WRITER_ACTIVE); if (cntflag == 0) break; /* Another active reader or writer is working. */ LOCK(&rwl->lock); if (rwl->cnt_and_flag != 0) WAIT(&rwl->writeable, &rwl->lock); UNLOCK(&rwl->lock); } INSIST((rwl->cnt_and_flag & WRITER_ACTIVE) != 0); rwl->write_granted++; } #ifdef ISC_RWLOCK_TRACE print_lock(isc_msgcat_get(isc_msgcat, ISC_MSGSET_RWLOCK, ISC_MSG_POSTLOCK, "postlock"), rwl, type); #endif return (ISC_R_SUCCESS); }