static Atom reserveAtom() { size_t index; #ifdef O_ATOMGC /* try to find a hole! */ int i; int last = FALSE; Atom a; unsigned int ref; int idx; for(index=GD->atoms.no_hole_before, i=MSB(index); !last; i++) { size_t upto = (size_t)2<<i; Atom b = GD->atoms.array.blocks[i]; if ( upto >= GD->atoms.highest ) { upto = GD->atoms.highest; last = TRUE; } for(; index<upto; index++) { a = b + index; ref = a->references; if ( ATOM_IS_FREE(ref) && COMPARE_AND_SWAP(&a->references, ref, ATOM_RESERVED_REFERENCE) ) { GD->atoms.no_hole_before = index+1; a->atom = (index<<LMASK_BITS)|TAG_ATOM; return a; } } } GD->atoms.no_hole_before = index+1; #endif /*O_ATOMGC*/ redo: index = GD->atoms.highest; idx = MSB(index); assert(index >= 0); if ( !GD->atoms.array.blocks[idx] ) { allocateAtomBlock(idx); } a = &GD->atoms.array.blocks[idx][index]; ref = a->references; if ( ATOM_IS_FREE(ref) && COMPARE_AND_SWAP(&a->references, ref, ATOM_RESERVED_REFERENCE) ) { ATOMIC_INC(&GD->atoms.highest); a->atom = (index<<LMASK_BITS)|TAG_ATOM; return a; } goto redo; }
int32_t spin_lock(spinlock_t l,int32_t count) { pthread_t tid = pthread_self(); if(tid == l->owner) { ++l->lock_count; return 0; } int32_t c = 0; int32_t max = 0; if(count == 0) { while(1) { if(l->owner == 0) { if(COMPARE_AND_SWAP(&(l->owner),0,tid) == 0) break; } for(c = 0; c < (max = rand()%128); ++c) __asm__("pause"); }; __sync_synchronize(); ++l->lock_count; l->lock_by_mtx = 0; return 0; } else { int32_t _l = 0; while((count--) > 0) { if(l->owner == 0) { if(COMPARE_AND_SWAP(&(l->owner),0,tid) == 0) { _l = 1; break; } } for(c = 0; c < (max = rand()%128); ++c) __asm__("pause"); } __sync_synchronize(); if(_l == 0) { mutex_lock(l->mtx); l->lock_by_mtx = 1; } ++l->lock_count; return 0; } }
static indirect * reserve_indirect(indirect_table *tab, word val ARG_LD) { size_t index; int i; int last = FALSE; for(index=tab->no_hole_before, i=MSB(index); !last; i++) { size_t upto = (size_t)2<<i; indirect *b = tab->array.blocks[i]; if ( upto >= tab->highest ) { upto = tab->highest; last = TRUE; } for(; index<upto; index++) { indirect *a = b + index; unsigned int refs = a->references; if ( INDIRECT_IS_FREE(refs) && COMPARE_AND_SWAP(&a->references, refs, INDIRECT_RESERVED_REFERENCE) ) { tab->no_hole_before = index+1; return create_indirect(a, index, val PASS_LD); } } } tab->no_hole_before = tab->highest; for(;;) { int idx; indirect *a; unsigned int refs; index = tab->highest; idx = MSB(index); if ( !tab->array.blocks[idx] ) allocate_indirect_block(tab, idx); a = &tab->array.blocks[idx][index]; refs = a->references; if ( INDIRECT_IS_FREE(refs) && COMPARE_AND_SWAP(&a->references, refs, INDIRECT_RESERVED_REFERENCE) ) { ATOMIC_INC(&tab->highest); return create_indirect(a, index, val PASS_LD); } } }
refobj *cast2refobj(ident _ident) { refobj *ptr = NULL; if(unlikely(!_ident.ptr)) return NULL; TRY{ refobj *o = (refobj*)_ident.ptr; uint32_t c = 0; struct timespec ts; while(_ident.identity == o->identity){ if(COMPARE_AND_SWAP(&o->flag,0,1)){ if(_ident.identity == o->identity){ if(__sync_fetch_and_add(&o->refcount,1) > 0) ptr = o; else o->refcount = 0; } o->flag = 0; break; }else{ if(++c < 4000){ __asm__("pause"); }else{ c = 0; ts.tv_sec = 0; ts.tv_nsec = 500000; nanosleep(&ts, NULL); } } } }CATCH_ALL{ ptr = NULL; }ENDTRY; return ptr; }
functor_t lookupFunctorDef(atom_t atom, size_t arity) { GET_LD int v; FunctorDef *table; int buckets; FunctorDef f, head; redo: acquire_functor_table(table, buckets); v = (int)pointerHashValue(atom, buckets); head = table[v]; DEBUG(9, Sdprintf("Lookup functor %s/%d = ", stringAtom(atom), arity)); for(f = table[v]; f; f = f->next) { if (atom == f->name && f->arity == arity) { DEBUG(9, Sdprintf("%p (old)\n", f)); if ( !FUNCTOR_IS_VALID(f->flags) ) { goto redo; } release_functor_table(); return f->functor; } } if ( functorDefTable->buckets * 2 < GD->statistics.functors ) { LOCK(); rehashFunctors(); UNLOCK(); } if ( !( head == table[v] && table == functorDefTable->table ) ) goto redo; f = (FunctorDef) allocHeapOrHalt(sizeof(struct functorDef)); f->functor = 0L; f->name = atom; f->arity = arity; f->flags = 0; f->next = table[v]; if ( !( COMPARE_AND_SWAP(&table[v], head, f) && table == functorDefTable->table) ) { PL_free(f); goto redo; } registerFunctor(f); ATOMIC_INC(&GD->statistics.functors); PL_register_atom(atom); DEBUG(9, Sdprintf("%p (new)\n", f)); release_functor_table(); return f->functor; }
word intern_indirect(indirect_table *tab, word val, int create ARG_LD) { Word idata = addressIndirect(val); /* points at header */ size_t isize = wsizeofInd(*idata); /* include header */ unsigned int key = MurmurHashAligned2(idata+1, isize*sizeof(word), MURMUR_SEED); indirect_buckets *buckets; for(;;) { buckets = acquire_itable_buckets(tab); unsigned int ki = key & (buckets->size-1); indirect *head = buckets->buckets[ki]; indirect *h; acquire_itable_bucket(&buckets->buckets[ki]); for(h=buckets->buckets[ki]; h; h = h->next) { unsigned int ref = h->references; if ( INDIRECT_IS_VALID(ref) && idata[0] == h->header && memcmp(idata+1, h->data, isize*sizeof(word)) == 0 ) { if ( bump_ref(h, ref) ) { release_itable_buckets(); return h->handle; } } } if ( TIGHT(buckets, tab) ) { simpleMutexLock(&tab->mutex); rehash_indirect_table(tab); simpleMutexUnlock(&tab->mutex); } if ( buckets != tab->table || head != buckets->buckets[ki] ) continue; /* try again */ if ( create ) { indirect *h = reserve_indirect(tab, val PASS_LD); h->next = buckets->buckets[ki]; if ( !COMPARE_AND_SWAP(&buckets->buckets[ki], head, h) || buckets != tab->table ) { PL_free(h->data); h->references = 0; continue; /* try again */ } h->references = 1 | INDIRECT_VALID_REFERENCE | INDIRECT_RESERVED_REFERENCE; ATOMIC_INC(&tab->count); release_itable_buckets(); return h->handle; } else { release_itable_buckets(); return 0; } } }
void init_tls() { if(!is_init) { if(COMPARE_AND_SWAP(&is_init,0,1)) { pthread_key_create(&thread_key,0); tls_mtx = mutex_create(); tls_list = list_create(sizeof(hash_map_t)); } } }
static int bump_ref(indirect *h, unsigned int refs) { for(;;) { if ( COMPARE_AND_SWAP(&h->references, refs, refs+1) ) { return TRUE; } else { refs = h->references; if ( !INDIRECT_IS_VALID(refs) ) return FALSE; } } }
void close_log_system() { COMPARE_AND_SWAP(&(g_log_system->is_close),0,1); //停止写日志线程,并等待结束 //g_log_system->is_close = 1; thread_join(g_log_system->worker_thread); mutex_lock(g_log_system->mtx); while(!link_list_is_empty(g_log_system->log_files)) { log_t l = LINK_LIST_POP(log_t,g_log_system->log_files); destroy_log(&l); } mutex_unlock(g_log_system->mtx); mutex_destroy(&g_log_system->mtx); destroy_link_list(&g_log_system->log_files); destroy_thread(&g_log_system->worker_thread); //DESTROY(&(g_log_system->_wpacket_allocator)); free(g_log_system); g_log_system = 0; }
void clear_tls() { if(is_init) { if(COMPARE_AND_SWAP(&is_init,1,0) == 1) { pthread_key_delete(thread_key); mutex_lock(tls_mtx); list_iter it = list_begin(tls_list); list_iter end = list_end(tls_list); for( ; !IT_LIST_EQUAL(it,end); IT_LIST_NEXT(it)) { hash_map_t h = IT_LIST_GET(hash_map_t,it); hash_map_destroy(&h); } mutex_unlock(tls_mtx); mutex_destroy(&tls_mtx); list_destroy(&tls_list); } } }
uint32_t refobj_dec(refobj *r) { volatile uint32_t count; uint32_t c; struct timespec ts; assert(r->refcount > 0); if((count = ATOMIC_DECREASE(&r->refcount)) == 0){ for(c = 0,r->identity = 0;;){ if(COMPARE_AND_SWAP(&r->flag,0,1)) break; if(++c < 4000){ __asm__("pause"); }else{ c = 0; ts.tv_sec = 0; ts.tv_nsec = 500000; nanosleep(&ts, NULL); } } r->destructor(r); } return count; }