void* nxp_alloc(nxp_pool* pool) { nxp_object* obj=pool->free_first; if (!obj) { int nitems=pool->chunk->nitems*2; if (nitems>1024) nitems=1024; nxp_chunk* chunk=nx_alloc(offsetof(nxp_chunk, pool)+nitems*pool->object_size); if (!chunk) { nxweb_log_error("nx_pool: alloc obj[%d] chunk failed", nitems); return 0; } chunk->nitems=nitems; chunk->prev=pool->chunk; chunk->id=chunk->prev? chunk->prev->id+1 : 1; pool->chunk=chunk; nxp_init_chunk(pool); obj=pool->free_first; } if (obj->next) { pool->free_first=obj->next; obj->next->prev=0; } else { pool->free_first= pool->free_last=0; } obj->in_use=1; obj->prev=0; obj->next=0; return obj+1; }
nxp_pool* nxp_create(int object_size, int initial_chunk_size) { object_size=(object_size+7)&~0x7; // align to 8 bytes int alloc_size=(sizeof(nxp_object)+object_size)*initial_chunk_size; nxp_pool* pool=nx_alloc(sizeof(nxp_pool)+sizeof(nxp_chunk)+alloc_size); nxp_init(pool, object_size, (void*)(pool+1), sizeof(nxp_chunk)+alloc_size); return pool; }
inline int nx_setup_rx_vmkbounce_buffers(struct unm_adapter_s * adapter, nx_host_rx_ctx_t *nxhal_host_rx_ctx) { int i,ring; struct vmk_bounce *bounce = NULL; nx_host_rds_ring_t *nxhal_host_rds_ring = NULL; rds_host_ring_t *host_rds_ring = NULL; void *vaddr_off; uint64_t dmaddr_off; unsigned int len; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { return 0; } for (ring = 0; ring < NUM_RCV_DESC_RINGS; ring++) { nxhal_host_rds_ring = &nxhal_host_rx_ctx->rds_rings[ring]; host_rds_ring = (rds_host_ring_t *) nxhal_host_rds_ring->os_data; bounce = &host_rds_ring->vmk_bounce; bounce->max = (ring ? (MAX_VMK_BOUNCE / 16) : MAX_VMK_BOUNCE ) ; len = host_rds_ring->dma_size * MAX_VMK_BOUNCE; bounce->len = len; bounce->index = 0; vaddr_off = nx_alloc(adapter, len, (dma_addr_t *)&dmaddr_off, &bounce->pdev); if (vaddr_off == NULL){ printk (KERN_WARNING"%s:%s failed to alloc rx bounce buffers for device %s \n", unm_nic_driver_name, __FUNCTION__, adapter->netdev->name); return -1; } bounce->vaddr_off = vaddr_off; bounce->dmaddr_off = dmaddr_off; TAILQ_INIT (&bounce->free_vmk_bounce); for (i = 0; i < (bounce->max); i++) { bounce->buf[i].data = vaddr_off; bounce->buf[i].phys = dmaddr_off; bounce->buf[i].busy = 0; bounce->buf[i].index = i; TAILQ_INSERT_TAIL(&bounce->free_vmk_bounce, &(bounce->buf[i]), link); vaddr_off += host_rds_ring->dma_size; dmaddr_off += host_rds_ring->dma_size; } spin_lock_init(&bounce->lock); } return 0; }
inline int nx_setup_tx_vmkbounce_buffers(struct unm_adapter_s * adapter) { int i; void *vaddr_off; uint64_t dmaddr_off; unsigned int len; struct vmk_bounce *bounce = &adapter->vmk_bounce; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { return 0; } adapter->bounce = 1; len = PAGE_SIZE * MAX_VMK_BOUNCE; bounce->len = len; bounce->index = 0; bounce->max = MAX_VMK_BOUNCE; vaddr_off = nx_alloc(adapter, len, (dma_addr_t *)&dmaddr_off, &bounce->pdev); if (vaddr_off == NULL){ printk (KERN_WARNING"%s:%s failed to alloc tx bounce buffers for device %s \n", unm_nic_driver_name, __FUNCTION__, adapter->netdev->name); return -1; } bounce->vaddr_off = vaddr_off; bounce->dmaddr_off = dmaddr_off; TAILQ_INIT (&bounce->free_vmk_bounce); for (i = 0; i < (bounce->max); i++) { bounce->buf[i].data = vaddr_off; bounce->buf[i].phys = dmaddr_off; bounce->buf[i].busy = 0; bounce->buf[i].index = i; TAILQ_INSERT_TAIL(&bounce->free_vmk_bounce, &(bounce->buf[i]), link); vaddr_off += PAGE_SIZE; dmaddr_off += PAGE_SIZE; } spin_lock_init(&bounce->lock); return 0; }
inline int nx_setup_vlan_buffers(struct unm_adapter_s * adapter) { int i; if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { return 0; } struct unm_cmd_buffer *cmd_buf = adapter->cmd_buf_arr; for (i = 0; i < (adapter->MaxTxDescCount); i++) { cmd_buf[i].vlan_buf.data = nx_alloc(adapter, HDR_CP * sizeof(uint8_t), (dma_addr_t *)&(cmd_buf[i].vlan_buf.phys), &(cmd_buf[i].pdev)); if (cmd_buf[i].vlan_buf.data == NULL) return -1; } return 0; }