static void use_pool(struct atm_dev *dev,int pool) { struct zatm_dev *zatm_dev; unsigned long flags; int size; zatm_dev = ZATM_DEV(dev); if (!(zatm_dev->pool_info[pool].ref_count++)) { skb_queue_head_init(&zatm_dev->pool[pool]); size = pool-ZATM_AAL5_POOL_BASE; if (size < 0) size = 0; /* 64B... */ else if (size > 10) size = 10; /* ... 64kB */ spin_lock_irqsave(&zatm_dev->lock, flags); zpokel(zatm_dev,((zatm_dev->pool_info[pool].low_water/4) << uPD98401_RXFP_ALERT_SHIFT) | (1 << uPD98401_RXFP_BTSZ_SHIFT) | (size << uPD98401_RXFP_BFSZ_SHIFT), zatm_dev->pool_base+pool*2); zpokel(zatm_dev,(unsigned long) dummy,zatm_dev->pool_base+ pool*2+1); spin_unlock_irqrestore(&zatm_dev->lock, flags); zatm_dev->last_free[pool] = NULL; refill_pool(dev,pool); } DPRINTK("pool %d: %d\n",pool,zatm_dev->pool_info[pool].ref_count); }
void * memory_pool_malloc(struct memory_pool * pool, unsigned int bytes) { if (bytes < POOL_LARGE_ALLOC) { void * ptr; if (pool->head + bytes > pool->end) refill_pool(pool); assert(pool->head + bytes <= pool->end); ptr = pool->head; pool->head += bytes; pool->head = (unsigned char*)(((unsigned long)pool->head + POOL_ALIGN - 1) & ~(POOL_ALIGN - 1)); return ptr; } else { struct memory_block * block = (struct memory_block*)malloc(bytes + sizeof(struct memory_block)); block->next = pool->blocks; pool->blocks = block; return (block + 1); } }