static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm, struct sfm_pool* pool, int hash, unsigned long size) #endif { int r; int next_block; struct sfm_frag* volatile* f; struct sfm_frag* frag; unsigned long b; unsigned long eob; /* special case for r=hash */ r=hash; f=&pool->pool_hash[r].first; /* detach it from the free list */ if ((frag=frag_pop((struct sfm_frag**)f))==0) goto not_found; found: atomic_dec_long((long*)&pool->pool_hash[r].no); frag->u.nxt_free=0; /* mark it as 'taken' */ frag->id=pool_id; #ifdef DBG_F_MALLOC sfm_split_frag(qm, frag, size, file, func, line); #else sfm_split_frag(qm, frag, size); #endif if (&qm->pool[pool_id]==pool) atomic_inc_long((long*)&pool->hits); return frag; not_found: atomic_inc_long((long*)&pool->pool_hash[r].misses); r++; b=HASH_BIT_POS(r); while(r<SF_HASH_POOL_SIZE){ b=_next_set_bit(b, &pool->bitmap); next_block=_hash_range(b, &eob); r=(r<next_block)?next_block:r; for (; r<eob; r++){ f=&pool->pool_hash[r].first; if ((frag=frag_pop((struct sfm_frag**)f))!=0) goto found; atomic_inc_long((long*)&pool->pool_hash[r].misses); } b++; } atomic_inc_long((long*)&pool->missed); return 0; }
uLong oz_knl_handle_release (OZ_Handle handle, OZ_Procmode procmode) { Handleent *he; Long refc; OZ_Handletbl *handletbl; uLong sts; void *object; handletbl = OZ_SYS_PDATA_FROM_KNL (OZ_PROCMODE_KNL) -> handletbl; OZ_KNL_CHKOBJTYPE (handletbl, OZ_OBJTYPE_HANDLETBL); object = NULL; // assume entry is in use by someone LOCKTABLE_EX (handletbl); // get exclusive access to table so we can call free_handle sts = find_handle (handletbl, handle, procmode, &he); // find entry to be released if (sts == OZ_SUCCESS) { refc = atomic_inc_long (&(he -> refcount), -1); // if found, clear the assigned bit (refcount<00>) if (refc == 0) { // check for all references gone object = free_handle (handletbl, handle & handletbl -> curmask); // if so, free the entry off and get object pointer } } UNLOCKTABLE_EX (handletbl); // unlock table if (object != NULL) { OBJINCREFC (OZ_KNL_GETOBJTYPE (object), object, -1); // if we actually freed the entry, release object pointer } return (sts); }
void oz_knl_handle_putback (OZ_Handle handle) { Handleent *he; Long refc; OZ_Handle hi; OZ_Handletbl *handletbl; void *object; handletbl = OZ_SYS_PDATA_FROM_KNL (OZ_PROCMODE_KNL) -> handletbl; OZ_KNL_CHKOBJTYPE (handletbl, OZ_OBJTYPE_HANDLETBL); LOCKTABLE_SH (handletbl); /* lock the handle table */ hi = handle & handletbl -> curmask; /* get handle's index in table */ he = handletbl -> ents + hi; /* point to entry in question */ refc = atomic_inc_long (&(he -> refcount), -2); /* decrement ref count, leave bit <00> alone */ UNLOCKTABLE_SH (handletbl); /* unlock the handle table */ if (refc <= 0) { /* see if handle needs to be freed off */ if (refc < 0) oz_crash ("oz_knl_handle_putback: he %p -> refcount %d", he, refc); LOCKTABLE_EX (handletbl); /* if so, get exclusive access to table */ hi = handle & handletbl -> curmask; /* point to entry again (it may have moved) */ he = handletbl -> ents + hi; object = free_handle (handletbl, hi); /* release the entry */ UNLOCKTABLE_EX (handletbl); OBJINCREFC (OZ_KNL_GETOBJTYPE (object), object, -1); /* release object pointer */ } }
uLong oz_knl_handle_takeout (OZ_Handle handle, OZ_Procmode procmode, OZ_Secaccmsk secaccmsk, OZ_Objtype objtype, void **object_r, OZ_Secaccmsk *secaccmsk_r) { Handleent *he; Long tablelock; OZ_Handletbl *handletbl; uLong sts; handletbl = OZ_SYS_PDATA_FROM_KNL (OZ_PROCMODE_KNL) -> handletbl; OZ_KNL_CHKOBJTYPE (handletbl, OZ_OBJTYPE_HANDLETBL); LOCKTABLE_SH (handletbl); // lock for shared access sts = find_handle (handletbl, handle, procmode, &he); /* find the handle in question */ if ((sts == OZ_SUCCESS) /* make sure object type matches */ && (objtype != OZ_OBJTYPE_UNKNOWN) && (he -> objtype != objtype)) sts = OZ_BADHANDOBJTYPE; if ((sts == OZ_SUCCESS) && (secaccmsk & ~(he -> secaccmsk))) { /* ok, check its security attributes */ sts = OZ_SECACCDENIED; } if ((sts == OZ_SUCCESS) && (object_r != NULL)) { *object_r = he -> object; /* ok, return object pointer */ atomic_inc_long (&(he -> refcount), 2); /* inc entry ref count, leave bit <00> alone */ if (secaccmsk_r != NULL) *secaccmsk_r = he -> secaccmsk; /* maybe return security access mask */ } UNLOCKTABLE_SH (handletbl); // release shared lock return (sts); }
static inline void sfm_pool_insert (struct sfm_pool* pool, int hash, struct sfm_frag* frag) { unsigned long hash_bit; frag_push(&pool->pool_hash[hash].first, frag); atomic_inc_long((long*)&pool->pool_hash[hash].no); /* set it only if not already set (avoids an expensive * cache trashing atomic write op) */ hash_bit=HASH_TO_BITMAP(hash); if (!(atomic_get_long((long*)&pool->bitmap) & hash_bit)) atomic_or_long((long*)&pool->bitmap, hash_bit); }
static inline void sfm_insert_free(struct sfm_block* qm, struct sfm_frag* frag, int split) { struct sfm_frag** f; unsigned long p_id; int hash; unsigned long hash_bit; if (likely(frag->size<=SF_POOL_MAX_SIZE)){ hash=GET_SMALL_HASH(frag->size); if (unlikely((p_id=sfm_choose_pool(qm, frag, hash, split))== (unsigned long)-1)){ /* add it back to the "main" hash */ frag->id=(unsigned long)(-1); /* main hash marker */ /*insert it here*/ frag_push(&(qm->free_hash[hash].first), frag); atomic_inc_long((long*)&qm->free_hash[hash].no); /* set it only if not already set (avoids an expensive * cache trashing atomic write op) */ hash_bit=HASH_TO_BITMAP(hash); if (!(atomic_get_long((long*)&qm->bitmap) & hash_bit)) atomic_or_long((long*)&qm->bitmap, hash_bit); }else{ /* add it to one of the pools pool */ sfm_pool_insert(&qm->pool[p_id], hash, frag); } }else{ hash=GET_BIG_HASH(frag->size); SFM_MAIN_HASH_LOCK(qm, hash); f=&(qm->free_hash[hash].first); for(; *f; f=&((*f)->u.nxt_free)) if (frag->size <= (*f)->size) break; frag->id=(unsigned long)(-1); /* main hash marker */ /*insert it here*/ frag->u.nxt_free=*f; *f=frag; qm->free_hash[hash].no++; /* inc. big hash free size ? */ SFM_MAIN_HASH_UNLOCK(qm, hash); } }
static inline struct sfm_frag* pool_get_frag(struct sfm_block* qm, struct sfm_pool* pool, int hash, unsigned long size) #endif { int r; int next_block; struct sfm_frag* volatile* f; struct sfm_frag* frag; unsigned long b; unsigned long eob; /* special case for r=hash */ r=hash; f=&pool->pool_hash[r].first; if (*f==0) goto not_found; SFM_POOL_LOCK(pool, r); if (unlikely(*f==0)){ SFM_POOL_UNLOCK(pool, r); goto not_found; } found: /* detach it from the free list*/ frag=frag_pop((struct sfm_frag**)f); frag->u.nxt_free=0; /* mark it as 'taken' */ frag->id=pool_id; pool->pool_hash[r].no--; SFM_POOL_UNLOCK(pool, r); #ifdef DBG_F_MALLOC sfm_split_frag(qm, frag, size, file, func, line); #else sfm_split_frag(qm, frag, size); #endif if (&qm->pool[pool_id]==pool) atomic_inc_long((long*)&pool->hits); return frag; not_found: atomic_inc_long((long*)&pool->pool_hash[r].misses); r++; b=HASH_BIT_POS(r); while(r<SF_HASH_POOL_SIZE){ b=_next_set_bit(b, &pool->bitmap); next_block=_hash_range(b, &eob); r=(r<next_block)?next_block:r; for (; r<eob; r++){ f=&pool->pool_hash[r].first; if (*f){ SFM_POOL_LOCK(pool, r); if (unlikely(*f==0)){ /* not found */ SFM_POOL_UNLOCK(pool, r); }else goto found; } atomic_inc_long((long*)&pool->pool_hash[r].misses); } b++; } #if 0 /* EXPENSIVE BUG CHECK */ for (r=hash; r<SF_HASH_POOL_SIZE; r++){ f=&pool->pool_hash[r].first; if (*f){ SFM_POOL_LOCK(pool, r); if (unlikely(*f==0)){ /* not found */ SFM_POOL_UNLOCK(pool, r); }else{ b=_next_set_bit(HASH_BIT_POS(r), &pool->bitmap); next_block=_hash_range(b, &eob); BUG("pool_get_frag: found fragm. %d at %d (bit %ld range %ld-%ld), next set bit=%ld" " bitmap %ld (%p)\n", hash, r, HASH_BIT_POS(r), next_block, eob, b, pool->bitmap, &pool->bitmap); goto found; } } } #endif atomic_inc_long((long*)&pool->missed); return 0; }
static void fromtext_acquire(fromtext_t *fromtext) { if (fromtext) { atomic_inc_long(&fromtext->ref_count); } }
static void geos_context_acquire(geos_context_t *ctx) { if (ctx) { atomic_inc_long(&ctx->ref_count); } }