word intern_indirect(indirect_table *tab, word val, int create ARG_LD) { Word idata = addressIndirect(val); /* points at header */ size_t isize = wsizeofInd(*idata); /* include header */ unsigned int key = MurmurHashAligned2(idata+1, isize*sizeof(word), MURMUR_SEED); indirect_buckets *buckets; for(;;) { buckets = acquire_itable_buckets(tab); unsigned int ki = key & (buckets->size-1); indirect *head = buckets->buckets[ki]; indirect *h; acquire_itable_bucket(&buckets->buckets[ki]); for(h=buckets->buckets[ki]; h; h = h->next) { unsigned int ref = h->references; if ( INDIRECT_IS_VALID(ref) && idata[0] == h->header && memcmp(idata+1, h->data, isize*sizeof(word)) == 0 ) { if ( bump_ref(h, ref) ) { release_itable_buckets(); return h->handle; } } } if ( TIGHT(buckets, tab) ) { simpleMutexLock(&tab->mutex); rehash_indirect_table(tab); simpleMutexUnlock(&tab->mutex); } if ( buckets != tab->table || head != buckets->buckets[ki] ) continue; /* try again */ if ( create ) { indirect *h = reserve_indirect(tab, val PASS_LD); h->next = buckets->buckets[ki]; if ( !COMPARE_AND_SWAP(&buckets->buckets[ki], head, h) || buckets != tab->table ) { PL_free(h->data); h->references = 0; continue; /* try again */ } h->references = 1 | INDIRECT_VALID_REFERENCE | INDIRECT_RESERVED_REFERENCE; ATOMIC_INC(&tab->count); release_itable_buckets(); return h->handle; } else { release_itable_buckets(); return 0; } } }
static void zown(zipper *z) { int tid = PL_thread_self(); if ( z->owner != tid ) { simpleMutexLock(&z->lock); z->owner = tid; } }
static int zlock(zipper *z) { int tid = PL_thread_self(); if ( z->owner != tid ) { simpleMutexLock(&z->lock); z->owner = tid; z->lock_count = 1; } else { z->lock_count++; } return TRUE; }
thread_info * rdf_thread_info(rdf_db *db, int tid) { query_admin *qa = &db->queries; per_thread *td = &qa->query.per_thread; thread_info *ti; size_t idx = MSB(tid); if ( !td->blocks[idx] ) { simpleMutexLock(&qa->query.lock); if ( !td->blocks[idx] ) { size_t bs = BLOCKLEN(idx); thread_info **newblock = rdf_malloc(db, bs*sizeof(thread_info*)); memset(newblock, 0, bs*sizeof(thread_info*)); td->blocks[idx] = newblock-bs; } simpleMutexUnlock(&qa->query.lock); } if ( !(ti=td->blocks[idx][tid]) ) { simpleMutexLock(&qa->query.lock); if ( !(ti=td->blocks[idx][tid]) ) { ti = rdf_malloc(db, sizeof(*ti)); memset(ti, 0, sizeof(*ti)); init_query_stack(db, &ti->queries); MEMORY_BARRIER(); td->blocks[idx][tid] = ti; if ( tid > qa->query.thread_max ) qa->query.thread_max = tid; } simpleMutexUnlock(&qa->query.lock); } return ti; }
static void allocate_indirect_block(indirect_table *tab, int idx) { simpleMutexLock(&tab->mutex); if ( !tab->array.blocks[idx] ) { size_t bs = (size_t)1<<idx; indirect *newblock; if ( !(newblock=PL_malloc(bs*sizeof(*newblock))) ) outOfCore(); memset(newblock, 0, bs*sizeof(*newblock)); tab->array.blocks[idx] = newblock-bs; } simpleMutexUnlock(&tab->mutex); }