/* check if nonce w/ index i is expected/valid and if so marked it "seen" * returns: 0 - ok, < 0 some error: * OTN_INV_POOL (pool number is invalid/corrupted) * OTN_ID_OVERFLOW (crt_id has overflowed with partition size since the * id was generated) * OTN_REPLAY (nonce id seen before => replay ) */ enum otn_check_ret otn_check_id(nid_t id, unsigned pool) { unsigned int i; unsigned n, b; otn_cell_t v, b_mask; if (unlikely(pool>=nid_pool_no)) return OTN_INV_POOL; if (unlikely(otn_id_check_overflow(id, pool))) return OTN_ID_OVERFLOW; n=get_otn_array_bit_idx(id, pool); /* n-th bit */ i=get_otn_array_cell_idx(n); /* aray index i, corresponding to n */ b=get_otn_cell_bit(n); /* bit pos corresponding to n */ b_mask= (otn_cell_t)1<<b; #ifdef OTN_CELL_T_LONG v=atomic_get_long(&oth_array[i]); if (unlikely(v & b_mask)) return OTN_REPLAY; atomic_or_long((long*)&otn_array[i], b_mask); #else v=atomic_get_int(&otn_array[i]); if (unlikely(v & b_mask)) return OTN_REPLAY; atomic_or_int((int*)&otn_array[i], b_mask); #endif /* OTN_CELL_T_LONG */ return 0; }
static inline void sfm_pool_insert (struct sfm_pool* pool, int hash, struct sfm_frag* frag) { unsigned long hash_bit; frag_push(&pool->pool_hash[hash].first, frag); atomic_inc_long((long*)&pool->pool_hash[hash].no); /* set it only if not already set (avoids an expensive * cache trashing atomic write op) */ hash_bit=HASH_TO_BITMAP(hash); if (!(atomic_get_long((long*)&pool->bitmap) & hash_bit)) atomic_or_long((long*)&pool->bitmap, hash_bit); }
static inline void sfm_insert_free(struct sfm_block* qm, struct sfm_frag* frag, int split) { struct sfm_frag** f; unsigned long p_id; int hash; unsigned long hash_bit; if (likely(frag->size<=SF_POOL_MAX_SIZE)){ hash=GET_SMALL_HASH(frag->size); if (unlikely((p_id=sfm_choose_pool(qm, frag, hash, split))== (unsigned long)-1)){ /* add it back to the "main" hash */ SFM_MAIN_HASH_LOCK(qm, hash); frag->id=(unsigned long)(-1); /* main hash marker */ /*insert it here*/ frag_push(&(qm->free_hash[hash].first), frag); qm->free_hash[hash].no++; /* set it only if not already set (avoids an expensive * cache trashing atomic write op) */ hash_bit=HASH_TO_BITMAP(hash); if (!(atomic_get_long((long*)&qm->bitmap) & hash_bit)) atomic_or_long((long*)&qm->bitmap, hash_bit); SFM_MAIN_HASH_UNLOCK(qm, hash); }else{ /* add it to one of the pools pool */ sfm_pool_insert(&qm->pool[p_id], hash, frag); } }else{ hash=GET_BIG_HASH(frag->size); SFM_MAIN_HASH_LOCK(qm, hash); f=&(qm->free_hash[hash].first); for(; *f; f=&((*f)->u.nxt_free)) if (frag->size <= (*f)->size) break; frag->id=(unsigned long)(-1); /* main hash marker */ /*insert it here*/ frag->u.nxt_free=*f; *f=frag; qm->free_hash[hash].no++; /* inc. big hash free size ? */ SFM_MAIN_HASH_UNLOCK(qm, hash); } }