int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { vnic_dev_desc_ring_size(ring, desc_count, desc_size); ring->descs_unaligned = dma_alloc_coherent(&vdev->pdev->dev, ring->size_unaligned, &ring->base_addr_unaligned, GFP_KERNEL); if (!ring->descs_unaligned) { printk(KERN_ERR "Failed to allocate ring (size=%d), aborting\n", (int)ring->size); return -ENOMEM; } ring->base_addr = ALIGN(ring->base_addr_unaligned, ring->base_align); ring->descs = (u8 *)ring->descs_unaligned + (ring->base_addr - ring->base_addr_unaligned); vnic_dev_clear_desc_ring(ring); ring->desc_avail = ring->desc_count - 1; return 0; }
void vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) { struct vnic_rq_buf *buf; u32 fetch_index; unsigned int count = rq->ring.desc_count; buf = rq->to_clean; while (vnic_rq_desc_used(rq) > 0) { (*buf_clean)(rq, buf); buf = rq->to_clean = buf->next; rq->ring.desc_avail++; } /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ /* Hardware surprise removal: reset fetch_index */ fetch_index = 0; } rq->to_use = rq->to_clean = &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; iowrite32(fetch_index, &rq->ctrl->posted_index); vnic_dev_clear_desc_ring(&rq->ring); }
void vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq *wq, struct vnic_wq_buf *buf)) { struct vnic_wq_buf *buf; BUG_ON(ioread32(&wq->ctrl->enable)); buf = wq->to_clean; while (vnic_wq_desc_used(wq) > 0) { (*buf_clean)(wq, buf); buf = wq->to_clean = buf->next; wq->ring.desc_avail++; } wq->to_use = wq->to_clean = wq->bufs[0]; iowrite32(0, &wq->ctrl->fetch_index); iowrite32(0, &wq->ctrl->posted_index); iowrite32(0, &wq->ctrl->error_status); vnic_dev_clear_desc_ring(&wq->ring); }
void vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) { struct vnic_rq_buf *buf; u32 fetch_index; BUG_ON(ioread32(&rq->ctrl->enable)); buf = rq->to_clean; while (vnic_rq_desc_used(rq) > 0) { (*buf_clean)(rq, buf); buf = rq->to_clean = buf->next; rq->ring.desc_avail++; } /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); rq->to_use = rq->to_clean = &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES] [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES]; iowrite32(fetch_index, &rq->ctrl->posted_index); rq->buf_index = 0; vnic_dev_clear_desc_ring(&rq->ring); }
void vnic_rq_clean(struct vnic_rq *rq, void (*buf_clean)(struct vnic_rq *rq, struct vnic_rq_buf *buf)) { struct vnic_rq_buf *buf; u32 fetch_index; unsigned int count = rq->ring.desc_count; int i; buf = rq->to_clean; for (i = 0; i < rq->ring.desc_count; i++) { (*buf_clean)(rq, buf); buf = buf->next; } rq->ring.desc_avail = rq->ring.desc_count - 1; /* Use current fetch_index as the ring starting point */ fetch_index = ioread32(&rq->ctrl->fetch_index); if (fetch_index == 0xFFFFFFFF) { /* check for hardware gone */ /* Hardware surprise removal: reset fetch_index */ fetch_index = 0; } rq->to_use = rq->to_clean = &rq->bufs[fetch_index / VNIC_RQ_BUF_BLK_ENTRIES(count)] [fetch_index % VNIC_RQ_BUF_BLK_ENTRIES(count)]; iowrite32(fetch_index, &rq->ctrl->posted_index); /* Anytime we write fetch_index, we need to re-write 0 to rq->enable * to re-sync internal VIC state. */ iowrite32(0, &rq->ctrl->enable); vnic_dev_clear_desc_ring(&rq->ring); }
int vnic_dev_alloc_desc_ring(struct vnic_dev *vdev, struct vnic_dev_ring *ring, unsigned int desc_count, unsigned int desc_size) { vnic_dev_desc_ring_size(ring, desc_count, desc_size); ring->descs_unaligned = pci_alloc_consistent(vdev->pdev, ring->size_unaligned, &ring->base_addr_unaligned); if (!ring->descs_unaligned) { vdev_err(vdev, "Failed to allocate ring (size=%d), aborting\n", (int)ring->size); return -ENOMEM; } ring->base_addr = ALIGN(ring->base_addr_unaligned, ring->base_align); ring->descs = (u8 *)ring->descs_unaligned + (ring->base_addr - ring->base_addr_unaligned); vnic_dev_clear_desc_ring(ring); ring->desc_avail = ring->desc_count - 1; return 0; }
void vnic_wq_clean(struct vnic_wq *wq, void (*buf_clean)(struct vnic_wq_buf *buf)) { struct vnic_wq_buf *buf; unsigned int to_clean = wq->tail_idx; buf = &wq->bufs[to_clean]; while (vnic_wq_desc_used(wq) > 0) { (*buf_clean)(buf); to_clean = buf_idx_incr(wq->ring.desc_count, to_clean); buf = &wq->bufs[to_clean]; wq->ring.desc_avail++; } wq->head_idx = 0; wq->tail_idx = 0; wq->last_completed_index = 0; *((uint32_t *)wq->cqmsg_rz->addr) = 0; iowrite32(0, &wq->ctrl->fetch_index); iowrite32(0, &wq->ctrl->posted_index); iowrite32(0, &wq->ctrl->error_status); vnic_dev_clear_desc_ring(&wq->ring); }
void vnic_cq_clean(struct vnic_cq *cq) { cq->to_clean = 0; cq->last_color = 0; iowrite32(0, &cq->ctrl->cq_head); iowrite32(0, &cq->ctrl->cq_tail); iowrite32(1, &cq->ctrl->cq_tail_color); vnic_dev_clear_desc_ring(&cq->ring); }
void vnic_wq_copy_clean(struct vnic_wq_copy *wq, void (*q_clean)(struct vnic_wq_copy *wq, struct fcpio_host_req *wq_desc)) { BUG_ON(ioread32(&wq->ctrl->enable)); if (vnic_wq_copy_desc_in_use(wq)) vnic_wq_copy_service(wq, -1, q_clean); wq->to_use_index = wq->to_clean_index = 0; iowrite32(0, &wq->ctrl->fetch_index); iowrite32(0, &wq->ctrl->posted_index); iowrite32(0, &wq->ctrl->error_status); vnic_dev_clear_desc_ring(&wq->ring); }