static inline void sfm_pool_insert (struct sfm_pool* pool, int hash, struct sfm_frag* frag) { unsigned long hash_bit; frag_push(&pool->pool_hash[hash].first, frag); atomic_inc_long((long*)&pool->pool_hash[hash].no); /* set it only if not already set (avoids an expensive * cache trashing atomic write op) */ hash_bit=HASH_TO_BITMAP(hash); if (!(atomic_get_long((long*)&pool->bitmap) & hash_bit)) atomic_or_long((long*)&pool->bitmap, hash_bit); }
static inline void sfm_insert_free(struct sfm_block* qm, struct sfm_frag* frag, int split) { struct sfm_frag** f; unsigned long p_id; int hash; unsigned long hash_bit; if (likely(frag->size<=SF_POOL_MAX_SIZE)){ hash=GET_SMALL_HASH(frag->size); if (unlikely((p_id=sfm_choose_pool(qm, frag, hash, split))== (unsigned long)-1)){ /* add it back to the "main" hash */ SFM_MAIN_HASH_LOCK(qm, hash); frag->id=(unsigned long)(-1); /* main hash marker */ /*insert it here*/ frag_push(&(qm->free_hash[hash].first), frag); qm->free_hash[hash].no++; /* set it only if not already set (avoids an expensive * cache trashing atomic write op) */ hash_bit=HASH_TO_BITMAP(hash); if (!(atomic_get_long((long*)&qm->bitmap) & hash_bit)) atomic_or_long((long*)&qm->bitmap, hash_bit); SFM_MAIN_HASH_UNLOCK(qm, hash); }else{ /* add it to one of the pools pool */ sfm_pool_insert(&qm->pool[p_id], hash, frag); } }else{ hash=GET_BIG_HASH(frag->size); SFM_MAIN_HASH_LOCK(qm, hash); f=&(qm->free_hash[hash].first); for(; *f; f=&((*f)->u.nxt_free)) if (frag->size <= (*f)->size) break; frag->id=(unsigned long)(-1); /* main hash marker */ /*insert it here*/ frag->u.nxt_free=*f; *f=frag; qm->free_hash[hash].no++; /* inc. big hash free size ? */ SFM_MAIN_HASH_UNLOCK(qm, hash); } }