int main(void) { uint64_t s_b, e_b, i; ck_rwlock_t rwlock = CK_RWLOCK_INITIALIZER; for (i = 0; i < STEPS; i++) { ck_rwlock_write_lock(&rwlock); ck_rwlock_write_unlock(&rwlock); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { ck_rwlock_write_lock(&rwlock); ck_rwlock_write_unlock(&rwlock); } e_b = rdtsc(); printf("WRITE: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); for (i = 0; i < STEPS; i++) { ck_rwlock_read_lock(&rwlock); ck_rwlock_read_unlock(&rwlock); } s_b = rdtsc(); for (i = 0; i < STEPS; i++) { ck_rwlock_read_lock(&rwlock); ck_rwlock_read_unlock(&rwlock); } e_b = rdtsc(); printf("READ: rwlock %15" PRIu64 "\n", (e_b - s_b) / STEPS); return (0); }
segment_t* _segment_list_get_segment_for_writing(struct segment_list *segment_list, uint32_t segment_number) { // We will never modify the segment list in this function, so we can take a read lock ck_rwlock_read_lock(segment_list->lock); segment_t *segment = __segment_number_to_segment(segment_list, segment_number); // Make sure we are not trying to get a segment before it has been allocated. Getting a segment // anytime after it was allocated can easily happen because of a slow thread, but getting it // before it has been allocated should not happen. ensure(segment_number < segment_list->head, "Attempted to get a segment before it was allocated"); // This segment is outside the list // TODO: More specific error handling if (!__is_segment_number_in_segment_list_inlock(segment_list, segment_number)) { ck_rwlock_read_unlock(segment_list->lock); return NULL; } // If this segment is not in the WRITING state, we may have just been too slow, so return NULL // rather than asserting to give the caller an opportunity to recover // TODO: More specific error handling if (segment->state != WRITING) { ck_rwlock_read_unlock(segment_list->lock); return NULL; } // Increment the refcount of the newly initialized segment since we are returning it ck_pr_inc_32(&segment->refcount); ck_rwlock_read_unlock(segment_list->lock); return segment; }
datum_t * hash_lookup (datum_t *key, hash_t * hash) { size_t i; datum_t *val; node_t *bucket; i = hashval(key, hash); ck_rwlock_read_lock(&hash->lock[i]); bucket = &hash->node[i]; if ( bucket == NULL ) { ck_rwlock_read_unlock(&hash->lock[i]); return NULL; } for (; bucket != NULL; bucket = bucket->next) { if (bucket->key && hash_keycmp(hash, key, bucket->key)) { val = datum_dup( bucket->val ); ck_rwlock_read_unlock(&hash->lock[i]); return val; } } ck_rwlock_read_unlock(&hash->lock[i]); return NULL; }
ph_hook_point_t *ph_hook_point_get(ph_string_t *name, bool create) { ph_hook_point_t *hp = 0; ck_rwlock_read_lock(&rwlock); { ph_ht_lookup(&hook_hash, &name, &hp, false); } ck_rwlock_read_unlock(&rwlock); if (hp || !create) { return hp; } ck_rwlock_write_lock(&rwlock); { // Look again: someone may have populated while we were unlocked ph_ht_lookup(&hook_hash, &name, &hp, false); if (!hp) { hp = ph_mem_alloc(mt.hookpoint); if (hp) { if (ph_ht_set(&hook_hash, &name, &hp) != PH_OK) { ph_mem_free(mt.hookpoint, hp); hp = NULL; } } } } ck_rwlock_write_unlock(&rwlock); return hp; }
int _segment_list_release_segment_for_reading(struct segment_list *segment_list, uint32_t segment_number) { ck_rwlock_read_lock(segment_list->lock); segment_t *segment = __segment_number_to_segment(segment_list, segment_number); // TODO: make this an actual error ensure(__is_segment_number_in_segment_list_inlock(segment_list, segment_number), "Attempted to release a segment not in the list"); ensure(segment->state != FREE, "Attempted to release segment in the FREE state"); ensure(segment->state != CLOSED, "Attempted to release segment in the CLOSED state"); ensure(segment->state != WRITING, "Attempted to release reading segment in the WRITING state"); ck_pr_dec_32(&segment->refcount); ck_rwlock_read_unlock(segment_list->lock); return 0; }
int hash_foreach (hash_t * hash, int (*func)(datum_t *, datum_t *, void *), void *arg) { int stop=0; size_t i; node_t *bucket; for (i = 0; i < hash->size && !stop; i++) { ck_rwlock_read_lock(&hash->lock[i]); for (bucket = &hash->node[i]; bucket != NULL && bucket->key != NULL; bucket = bucket->next) { if (bucket->key == NULL) continue; stop = func(bucket->key, bucket->val, arg); if (stop) break; } ck_rwlock_read_unlock(&hash->lock[i]); } return stop; }
static void * thread_lock(void *pun) { uint64_t s_b, e_b, a, i; uint64_t *value = pun; if (aff_iterate(&affinity) != 0) { perror("ERROR: Could not affine thread"); exit(EXIT_FAILURE); } ck_pr_inc_int(&barrier); while (ck_pr_load_int(&barrier) != threads) ck_pr_stall(); for (i = 1, a = 0;; i++) { s_b = rdtsc(); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); ck_rwlock_read_lock(&rw.lock); ck_rwlock_read_unlock(&rw.lock); e_b = rdtsc(); a += (e_b - s_b) >> 4; if (ck_pr_load_uint(&flag) == 1) break; } ck_pr_inc_int(&barrier); while (ck_pr_load_int(&barrier) != threads * 2) ck_pr_stall(); *value = (a / i); return NULL; }
bool _segment_list_is_empty(struct segment_list *segment_list) { ck_rwlock_read_lock(segment_list->lock); bool toRet = segment_list->head == segment_list->tail; ck_rwlock_read_unlock(segment_list->lock); return toRet; }
uint32_t _get_value(persistent_atomic_value_t *pav) { ck_rwlock_read_lock(pav->_lock); uint32_t current_value = ck_pr_load_32(&pav->_current_value); ck_rwlock_read_unlock(pav->_lock); return current_value; }