Beispiel #1
0
int
hashtable_insert(struct hashtable *h, void *k, void *v)
{
    /* This method allows duplicate keys - but they shouldn't be used */
    unsigned int index;
    struct entry *e;

    // Use write lock for entry count increment 
    rwlock_wrlock(&h->entrycountlock);
    if (++(h->entrycount) > h->loadlimit)
    {
        rwlock_wrunlock(&h->entrycountlock);

        /* Ignore the return value. If expand fails, we should
         * still try cramming just this value into the existing table
         * -- we may not have memory for a larger table, but one more
         * element may be ok. Next time we insert, we'll try expanding again.*/
        hashtable_expand(h);
    } else {
        rwlock_wrunlock(&h->entrycountlock);
    }

    e = (struct entry *)malloc(sizeof(struct entry));
    if (NULL == e) {
        // Use write lock for entry count decrement 
        rwlock_wrlock(&h->entrycountlock);
        --(h->entrycount);
        rwlock_wrunlock(&h->entrycountlock);
        return 0;
    } /*oom*/

    // Use global read lock for hashing/index calculations
    rwlock_rdlock(&h->globallock);
    e->h = hash(h,k);
    index = indexFor(h->tablelength,e->h);
    e->k = k;
    e->v = v;
    rwlock_rdunlock(&h->globallock);

    // Use global write lock for list insertion
    // TODO: internal lock causes problems, figure out why, using global instead
    //rwlock_wrlock(&h->locks[index]);
    rwlock_wrlock(&h->globallock);
#ifdef DEBUG 
    printf("[%.8x indexer] inserting '%s' into index[%d]...\n", pthread_self(), k, index);
#endif 
    e->next = h->table[index];
    h->table[index] = e;
    rwlock_wrunlock(&h->globallock);
    // TODO: internal lock causes problems, figure out why, using global instead
    //rwlock_wrunlock(&h->locks[index]);

    return -1;
}
Beispiel #2
0
void * /* returns value associated with key */
hashtable_remove(struct hashtable *h, void *k)
{
    /* TODO: consider compacting the table when the load factor drops enough,
     *       or provide a 'compact' method. */

    struct entry *e;
    struct entry **pE;
    void *v;
    unsigned int hashvalue, index;

    // Use global read lock for hashing/indexing
    rwlock_rdlock(&h->globallock);
    hashvalue = hash(h,k);
    index = indexFor(h->tablelength,hash(h,k));
    rwlock_rdunlock(&h->globallock);

    // Use local write lock for removal
    rwlock_wrlock(&h->locks[index]);
    pE = &(h->table[index]);
    e = *pE;
    while (NULL != e)
    {
        /* Check hash value to short circuit heavier comparison */
        if ((hashvalue == e->h) && (h->eqfn(k, e->k)))
        {
            *pE = e->next;

            // Use write lock for entry count decrement
            rwlock_wrlock(&h->entrycountlock);
            h->entrycount--;
            rwlock_wrunlock(&h->entrycountlock);

            v = e->v;
            freekey(e->k);
            free(e);

            rwlock_wrunlock(&h->locks[index]);
            return v;
        }
        pE = &(e->next);
        e = e->next;
    }
    rwlock_wrunlock(&h->locks[index]);

    return NULL;
}
Beispiel #3
0
static void *writethread(void *p) {
    int i;
    threadata_t *d = (threadata_t *)p;
    log("write\n");
    for (i = 0; i < 10; i++) {
        int j = rand() % (i + 1) + 1;
        d->data[i] = j;
    }
    rwlock_wrunlock(&d->lock);
    log("Done write\n");
    return NULL;
}
Beispiel #4
0
int repo_commit(repo* rep, const char* branchpath) {
	int err = 0;
	char* srcpath = gen_malloc(MAX_PATH_LEN);
	if (!srcpath) {
		err = -ENOMEM;
		goto exit;
	}
	char* dstpath = gen_malloc(MAX_PATH_LEN);
	if (!dstpath) {
		err = -ENOMEM;
		goto exit;
	}

	// Quiesce all FS activity and wait for outstanding meta-data updates
	// to the underlying FS to flush.
	rwlock_wrlock(&rep->fslock);
	sync();

	// All objects in stage are now frozen, they can be moved into
	// the globally shared object stores.
	gen_sprintf(srcpath, "%s/stage/objs", branchpath);
	gen_sprintf(dstpath, "%s/objs", rep->repo);
	err = moveobjects(dstpath, srcpath);
	if (err)
		goto exit_unlock;

	// Now move the staged root into the set of old roots
	uint64_t id = repo_newid(rep);
	gen_sprintf(srcpath, "%s/stage/root", branchpath);
	gen_sprintf(dstpath, "%s/oldroots/i%lu", branchpath, id);
	err = gen_rename(srcpath, dstpath);

exit_unlock:
	rwlock_wrunlock(&rep->fslock);
exit:
	if (srcpath)
		gen_free(srcpath);
	if (dstpath)
		gen_free(dstpath);
	return err;
}
Beispiel #5
0
void ListWRUnlock(list_p list)
{
  rwlock_wrunlock(list->rwlock);
}
Beispiel #6
0
static int
hashtable_expand(struct hashtable *h)
{
    // Acquire global write lock for entire function
    rwlock_wrlock(&h->globallock);

    /* Double the size of the table to accomodate more entries */
    struct entry **newtable;
    struct entry *e;
    struct entry **pE;
    unsigned int newsize, i, index;
    /* Check we're not hitting max capacity */
    if (h->primeindex == (prime_table_length - 1)) {
        // Release global write lock for early return
        rwlock_wrunlock(&h->globallock);
        return 0;
    }
    newsize = primes[++(h->primeindex)];

    newtable = (struct entry **)malloc(sizeof(struct entry*) * newsize);
    if (NULL != newtable)
    {
        memset(newtable, 0, newsize * sizeof(struct entry *));
        /* This algorithm is not 'stable'. ie. it reverses the list
         * when it transfers entries between the tables */
        for (i = 0; i < h->tablelength; i++) {
            while (NULL != (e = h->table[i])) {
                h->table[i] = e->next;
                index = indexFor(newsize,e->h);
                e->next = newtable[index];
                newtable[index] = e;
            }
        }
        free(h->table);
        h->table = newtable;
    }
    /* Plan B: realloc instead */
    else 
    {
        newtable = (struct entry **)
                   realloc(h->table, newsize * sizeof(struct entry *));
        if (NULL == newtable) {
            (h->primeindex)--;
            // Release global write lock for early return
            rwlock_wrunlock(&h->globallock);
            return 0;
        }
        h->table = newtable;
        memset(newtable[h->tablelength], 0, newsize - h->tablelength);
        for (i = 0; i < h->tablelength; i++) {
            for (pE = &(newtable[i]), e = *pE; e != NULL; e = *pE) {
                index = indexFor(newsize,e->h);
                if (index == i)
                {
                    pE = &(e->next);
                }
                else
                {
                    *pE = e->next;
                    e->next = newtable[index];
                    newtable[index] = e;
                }
            }
        }
    }

#ifdef DEBUG
    printf("resizing fine-grained rwlock array to %d locks.\n", newsize);
#endif
    // Realloc more rwlocks for newly resized table
    h->locks = (pthread_rwlock_t *) realloc(h->locks, sizeof(pthread_rwlock_t) * newsize);
    for(unsigned int i = h->num_locks; i < newsize; ++i) {
        if (pthread_rwlock_init(&h->locks[i], NULL)) {
            perror("pthread_rwlock_init");
            exit(1);
        }
    }
    h->num_locks = newsize;

    h->tablelength = newsize;
    h->loadlimit   = (unsigned int) ceil(newsize * max_load_factor);

    // Release global write lock
    rwlock_wrunlock(&h->globallock);

    return -1;
}