void * /* returns value associated with key */ hashtable_search(struct hashtable *h, void *k) { struct entry *e; unsigned int hashvalue, index; // Use global read lock for hashing/indexing rwlock_rdlock(&h->globallock); hashvalue = hash(h,k); index = indexFor(h->tablelength,hashvalue); rwlock_rdunlock(&h->globallock); // Use local read lock for searching rwlock_rdlock(&h->locks[index]); e = h->table[index]; while (NULL != e) { /* Check hash value to short circuit heavier comparison */ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { // Release local read lock for early return (key found) rwlock_rdunlock(&h->locks[index]); return e->v; } e = e->next; } // Release local read lock for late return (key not found) rwlock_rdunlock(&h->locks[index]); return NULL; }
unsigned int hashtable_count(struct hashtable *h) { // Use read lock for entry count rwlock_rdlock(&h->entrycountlock); unsigned int cnt = h->entrycount; rwlock_rdunlock(&h->entrycountlock); return cnt; }
static void *readthread(void *p) { threadata_t *d = (threadata_t *)p; int i=0; rwlock_rdlock(&d->lock); log("read\n"); for (i = 0; i < 10; i++) printf("%d\n", d->data[i]); rwlock_rdunlock(&d->lock); log("Done read\n"); return NULL; }
int hashtable_insert(struct hashtable *h, void *k, void *v) { /* This method allows duplicate keys - but they shouldn't be used */ unsigned int index; struct entry *e; // Use write lock for entry count increment rwlock_wrlock(&h->entrycountlock); if (++(h->entrycount) > h->loadlimit) { rwlock_wrunlock(&h->entrycountlock); /* Ignore the return value. If expand fails, we should * still try cramming just this value into the existing table * -- we may not have memory for a larger table, but one more * element may be ok. Next time we insert, we'll try expanding again.*/ hashtable_expand(h); } else { rwlock_wrunlock(&h->entrycountlock); } e = (struct entry *)malloc(sizeof(struct entry)); if (NULL == e) { // Use write lock for entry count decrement rwlock_wrlock(&h->entrycountlock); --(h->entrycount); rwlock_wrunlock(&h->entrycountlock); return 0; } /*oom*/ // Use global read lock for hashing/index calculations rwlock_rdlock(&h->globallock); e->h = hash(h,k); index = indexFor(h->tablelength,e->h); e->k = k; e->v = v; rwlock_rdunlock(&h->globallock); // Use global write lock for list insertion // TODO: internal lock causes problems, figure out why, using global instead //rwlock_wrlock(&h->locks[index]); rwlock_wrlock(&h->globallock); #ifdef DEBUG printf("[%.8x indexer] inserting '%s' into index[%d]...\n", pthread_self(), k, index); #endif e->next = h->table[index]; h->table[index] = e; rwlock_wrunlock(&h->globallock); // TODO: internal lock causes problems, figure out why, using global instead //rwlock_wrunlock(&h->locks[index]); return -1; }
void * /* returns value associated with key */ hashtable_remove(struct hashtable *h, void *k) { /* TODO: consider compacting the table when the load factor drops enough, * or provide a 'compact' method. */ struct entry *e; struct entry **pE; void *v; unsigned int hashvalue, index; // Use global read lock for hashing/indexing rwlock_rdlock(&h->globallock); hashvalue = hash(h,k); index = indexFor(h->tablelength,hash(h,k)); rwlock_rdunlock(&h->globallock); // Use local write lock for removal rwlock_wrlock(&h->locks[index]); pE = &(h->table[index]); e = *pE; while (NULL != e) { /* Check hash value to short circuit heavier comparison */ if ((hashvalue == e->h) && (h->eqfn(k, e->k))) { *pE = e->next; // Use write lock for entry count decrement rwlock_wrlock(&h->entrycountlock); h->entrycount--; rwlock_wrunlock(&h->entrycountlock); v = e->v; freekey(e->k); free(e); rwlock_wrunlock(&h->locks[index]); return v; } pE = &(e->next); e = e->next; } rwlock_wrunlock(&h->locks[index]); return NULL; }
void ListRDUnlock(list_p list) { rwlock_rdunlock(list->rwlock); }